summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp2
-rw-r--r--Android.mk14
-rw-r--r--adbconnection/adbconnection.cc379
-rw-r--r--adbconnection/adbconnection.h31
-rw-r--r--build/Android.common_test.mk4
-rw-r--r--build/Android.gtest.mk52
-rw-r--r--build/art.go2
-rw-r--r--cmdline/cmdline_parser_test.cc4
-rw-r--r--cmdline/cmdline_types.h7
-rw-r--r--compiler/Android.bp8
-rw-r--r--compiler/common_compiler_test.h2
-rw-r--r--compiler/debug/debug_info.h46
-rw-r--r--compiler/debug/elf_debug_info_writer.h4
-rw-r--r--compiler/debug/elf_debug_line_writer.h2
-rw-r--r--compiler/debug/elf_debug_writer.cc68
-rw-r--r--compiler/debug/elf_debug_writer.h9
-rw-r--r--compiler/debug/elf_gnu_debugdata_writer.h13
-rw-r--r--compiler/debug/elf_symtab_writer.h47
-rw-r--r--compiler/debug/method_debug_info.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc520
-rw-r--r--compiler/dex/dex_to_dex_compiler.h96
-rw-r--r--compiler/dex/inline_method_analyser.cc8
-rw-r--r--compiler/dex/quick_compiler_callbacks.cc15
-rw-r--r--compiler/dex/quick_compiler_callbacks.h11
-rw-r--r--compiler/driver/compiled_method_storage.cc11
-rw-r--r--compiler/driver/compiler_driver.cc610
-rw-r--r--compiler/driver/compiler_driver.h45
-rw-r--r--compiler/driver/compiler_options.cc1
-rw-r--r--compiler/driver/compiler_options.h8
-rw-r--r--compiler/driver/compiler_options_map-inl.h6
-rw-r--r--compiler/driver/compiler_options_map.def1
-rw-r--r--compiler/jit/jit_compiler.cc1
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.cc4
-rw-r--r--compiler/linker/elf_builder.h74
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc65
-rw-r--r--compiler/optimizing/builder.cc3
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator_arm64.cc41
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc66
-rw-r--r--compiler/optimizing/code_generator_mips.cc98
-rw-r--r--compiler/optimizing/code_generator_mips64.cc96
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc24
-rw-r--r--compiler/optimizing/code_generator_vector_arm_vixl.cc20
-rw-r--r--compiler/optimizing/code_generator_vector_mips.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_mips64.cc44
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc34
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc24
-rw-r--r--compiler/optimizing/code_generator_x86.cc37
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc37
-rw-r--r--compiler/optimizing/code_sinking.cc4
-rw-r--r--compiler/optimizing/codegen_test.cc87
-rw-r--r--compiler/optimizing/constant_folding_test.cc20
-rw-r--r--compiler/optimizing/data_type-inl.h2
-rw-r--r--compiler/optimizing/data_type.cc2
-rw-r--r--compiler/optimizing/data_type.h27
-rw-r--r--compiler/optimizing/dead_code_elimination.cc136
-rw-r--r--compiler/optimizing/dead_code_elimination.h1
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc8
-rw-r--r--compiler/optimizing/dominator_test.cc34
-rw-r--r--compiler/optimizing/find_loops_test.cc28
-rw-r--r--compiler/optimizing/graph_checker.cc10
-rw-r--r--compiler/optimizing/graph_checker_test.cc14
-rw-r--r--compiler/optimizing/graph_visualizer.cc11
-rw-r--r--compiler/optimizing/inliner.cc58
-rw-r--r--compiler/optimizing/inliner.h2
-rw-r--r--compiler/optimizing/instruction_builder.cc2
-rw-r--r--compiler/optimizing/instruction_builder.h3
-rw-r--r--compiler/optimizing/intrinsics.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc8
-rw-r--r--compiler/optimizing/intrinsics_mips.cc9
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc9
-rw-r--r--compiler/optimizing/intrinsics_x86.cc8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc8
-rw-r--r--compiler/optimizing/linearize_test.cc19
-rw-r--r--compiler/optimizing/live_ranges_test.cc16
-rw-r--r--compiler/optimizing/liveness_test.cc28
-rw-r--r--compiler/optimizing/load_store_elimination.cc381
-rw-r--r--compiler/optimizing/loop_optimization.cc68
-rw-r--r--compiler/optimizing/nodes.cc9
-rw-r--r--compiler/optimizing/nodes.h45
-rw-r--r--compiler/optimizing/nodes_vector.h72
-rw-r--r--compiler/optimizing/nodes_vector_test.cc168
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc54
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h1
-rw-r--r--compiler/optimizing/optimizing_unit_test.h60
-rw-r--r--compiler/optimizing/pretty_printer_test.cc28
-rw-r--r--compiler/optimizing/register_allocation_resolver.cc2
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc2
-rw-r--r--compiler/optimizing/register_allocator_linear_scan.cc2
-rw-r--r--compiler/optimizing/register_allocator_test.cc18
-rw-r--r--compiler/optimizing/scheduler_test.cc6
-rw-r--r--compiler/optimizing/ssa_test.cc30
-rw-r--r--compiler/optimizing/superblock_cloner.cc704
-rw-r--r--compiler/optimizing/superblock_cloner.h323
-rw-r--r--compiler/optimizing/superblock_cloner_test.cc121
-rw-r--r--compiler/optimizing/suspend_check_test.cc16
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--compiler/utils/atomic_dex_ref_map-inl.h12
-rw-r--r--compiler/utils/atomic_dex_ref_map.h3
-rw-r--r--compiler/utils/test_dex_file_builder.h4
-rw-r--r--compiler/utils/x86/assembler_x86.cc24
-rw-r--r--compiler/utils/x86/assembler_x86.h6
-rw-r--r--compiler/utils/x86/assembler_x86_test.cc4
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc23
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h6
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc5
-rw-r--r--dex2oat/Android.bp18
-rw-r--r--dex2oat/dex2oat.cc83
-rw-r--r--dex2oat/dex2oat_image_test.cc14
-rw-r--r--dex2oat/dex2oat_options.cc9
-rw-r--r--dex2oat/dex2oat_options.def2
-rw-r--r--dex2oat/dex2oat_test.cc61
-rw-r--r--dex2oat/linker/elf_writer.h13
-rw-r--r--dex2oat/linker/elf_writer_quick.cc59
-rw-r--r--dex2oat/linker/image_test.h17
-rw-r--r--dex2oat/linker/oat_writer.cc526
-rw-r--r--dex2oat/linker/oat_writer.h28
-rw-r--r--dex2oat/linker/oat_writer_test.cc22
-rw-r--r--dexdump/Android.bp26
-rw-r--r--dexdump/dexdump.cc72
-rw-r--r--dexdump/dexdump_cfg.cc2
-rw-r--r--dexdump/dexdump_main.cc43
-rw-r--r--dexlayout/Android.bp12
-rw-r--r--dexlayout/compact_dex_writer.cc421
-rw-r--r--dexlayout/compact_dex_writer.h146
-rw-r--r--dexlayout/dex_container.h87
-rw-r--r--dexlayout/dex_ir.cc92
-rw-r--r--dexlayout/dex_ir.h50
-rw-r--r--dexlayout/dex_ir_builder.cc36
-rw-r--r--dexlayout/dex_ir_builder.h11
-rw-r--r--dexlayout/dex_visualize.cc4
-rw-r--r--dexlayout/dex_writer.cc706
-rw-r--r--dexlayout/dex_writer.h255
-rw-r--r--dexlayout/dexdiag.cc5
-rw-r--r--dexlayout/dexlayout.cc119
-rw-r--r--dexlayout/dexlayout.h39
-rw-r--r--dexlayout/dexlayout_main.cc67
-rw-r--r--dexlayout/dexlayout_test.cc162
-rw-r--r--dexlist/Android.bp6
-rw-r--r--dexlist/dexlist.cc76
-rw-r--r--dexoptanalyzer/dexoptanalyzer.cc1
-rw-r--r--dt_fd_forward/dt_fd_forward.cc77
-rw-r--r--dt_fd_forward/dt_fd_forward.h4
-rw-r--r--dt_fd_forward/export/fd_transport.h6
-rw-r--r--oatdump/Android.bp2
-rw-r--r--oatdump/oatdump.cc94
-rw-r--r--openjdkjvmti/Android.bp1
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc180
-rw-r--r--openjdkjvmti/art_jvmti.h68
-rw-r--r--openjdkjvmti/deopt_manager.cc33
-rw-r--r--openjdkjvmti/deopt_manager.h7
-rw-r--r--openjdkjvmti/events-inl.h4
-rw-r--r--openjdkjvmti/events.cc6
-rw-r--r--openjdkjvmti/events.h5
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc97
-rw-r--r--openjdkjvmti/fixed_up_dex_file.h4
-rw-r--r--openjdkjvmti/ti_breakpoint.cc2
-rw-r--r--openjdkjvmti/ti_class.cc102
-rw-r--r--openjdkjvmti/ti_class_definition.cc311
-rw-r--r--openjdkjvmti/ti_class_definition.h106
-rw-r--r--openjdkjvmti/ti_class_loader.h2
-rw-r--r--openjdkjvmti/ti_field.cc2
-rw-r--r--openjdkjvmti/ti_method.cc16
-rw-r--r--openjdkjvmti/ti_redefine.cc19
-rw-r--r--openjdkjvmti/ti_redefine.h2
-rw-r--r--openjdkjvmti/ti_search.cc4
-rw-r--r--openjdkjvmti/ti_stack.cc2
-rw-r--r--openjdkjvmti/transform.cc239
-rw-r--r--openjdkjvmti/transform.h9
-rw-r--r--patchoat/Android.bp2
-rw-r--r--patchoat/patchoat.cc558
-rw-r--r--patchoat/patchoat.h19
-rw-r--r--patchoat/patchoat_test.cc141
-rw-r--r--profman/Android.bp1
-rw-r--r--profman/boot_image_profile.cc1
-rw-r--r--profman/boot_image_profile.h3
-rw-r--r--profman/profile_assistant.cc21
-rw-r--r--profman/profile_assistant.h11
-rw-r--r--profman/profile_assistant_test.cc137
-rw-r--r--profman/profman.cc215
-rw-r--r--runtime/Android.bp98
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S23
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc1
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S5
-rw-r--r--runtime/arch/mips/entrypoints_direct_mips.h1
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc2
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc1
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S3
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc1
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S7
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc1
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S10
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method-inl.h39
-rw-r--r--runtime/art_method.cc10
-rw-r--r--runtime/art_method.h26
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/arena_allocator.cc1
-rw-r--r--runtime/base/arena_allocator.h1
-rw-r--r--runtime/base/bit_string.h38
-rw-r--r--runtime/base/bit_string_test.cc26
-rw-r--r--runtime/base/bit_utils.h16
-rw-r--r--runtime/base/file_utils.cc7
-rw-r--r--runtime/base/file_utils.h3
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h8
-rw-r--r--runtime/check_reference_map_visitor.h2
-rw-r--r--runtime/class_linker.cc50
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/class_loader_context.cc14
-rw-r--r--runtime/class_loader_context_test.cc38
-rw-r--r--runtime/common_runtime_test.cc15
-rw-r--r--runtime/common_runtime_test.h6
-rw-r--r--runtime/common_throws.cc4
-rw-r--r--runtime/compiler_callbacks.h11
-rw-r--r--runtime/debugger.cc14
-rw-r--r--runtime/dex/art_dex_file_loader.cc480
-rw-r--r--runtime/dex/art_dex_file_loader.h126
-rw-r--r--runtime/dex/code_item_accessors-inl.h179
-rw-r--r--runtime/dex/code_item_accessors-no_art-inl.h166
-rw-r--r--runtime/dex/code_item_accessors.h15
-rw-r--r--runtime/dex/code_item_accessors_test.cc44
-rw-r--r--runtime/dex/compact_dex_debug_info.cc117
-rw-r--r--runtime/dex/compact_dex_debug_info.h65
-rw-r--r--runtime/dex/compact_dex_debug_info_test.cc95
-rw-r--r--runtime/dex/compact_dex_file.cc50
-rw-r--r--runtime/dex/compact_dex_file.h213
-rw-r--r--runtime/dex/compact_dex_file_test.cc61
-rw-r--r--runtime/dex/compact_dex_utils.h37
-rw-r--r--runtime/dex/descriptors_names.cc426
-rw-r--r--runtime/dex/descriptors_names.h63
-rw-r--r--runtime/dex/dex_file-inl.h6
-rw-r--r--runtime/dex/dex_file.cc25
-rw-r--r--runtime/dex/dex_file.h176
-rw-r--r--runtime/dex/dex_file_annotations.cc2
-rw-r--r--runtime/dex/dex_file_exception_helpers.cc2
-rw-r--r--runtime/dex/dex_file_layout.cc2
-rw-r--r--runtime/dex/dex_file_layout.h2
-rw-r--r--runtime/dex/dex_file_loader.cc568
-rw-r--r--runtime/dex/dex_file_loader.h153
-rw-r--r--runtime/dex/dex_file_test.cc48
-rw-r--r--runtime/dex/dex_file_verifier.cc15
-rw-r--r--runtime/dex/dex_file_verifier_test.cc6
-rw-r--r--runtime/dex/dex_instruction.cc2
-rw-r--r--runtime/dex/invoke_type.h (renamed from runtime/invoke_type.h)6
-rw-r--r--runtime/dex/modifiers.cc58
-rw-r--r--runtime/dex/modifiers.h (renamed from runtime/modifiers.h)27
-rw-r--r--runtime/dex/standard_dex_file.cc7
-rw-r--r--runtime/dex/standard_dex_file.h31
-rw-r--r--runtime/dex/utf-inl.h (renamed from runtime/utf-inl.h)6
-rw-r--r--runtime/dex/utf.cc (renamed from runtime/utf.cc)59
-rw-r--r--runtime/dex/utf.h (renamed from runtime/utf.h)15
-rw-r--r--runtime/dex/utf_test.cc (renamed from runtime/utf_test.cc)0
-rw-r--r--runtime/dex2oat_environment_test.h28
-rw-r--r--runtime/dex_to_dex_decompiler.cc12
-rw-r--r--runtime/dexopt_test.cc4
-rw-r--r--runtime/elf.h5
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc4
-rw-r--r--runtime/entrypoints_order_test.cc3
-rw-r--r--runtime/fault_handler.cc40
-rw-r--r--runtime/gc/allocator/dlmalloc.cc1
-rw-r--r--runtime/gc/allocator/dlmalloc.h1
-rw-r--r--runtime/gc/collector/concurrent_copying.cc16
-rw-r--r--runtime/gc/collector/concurrent_copying.h5
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h3
-rw-r--r--runtime/gc/heap-inl.h11
-rw-r--r--runtime/gc/heap.cc73
-rw-r--r--runtime/gc/heap.h33
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/gc/space/region_space-inl.h54
-rw-r--r--runtime/gc/space/region_space.cc53
-rw-r--r--runtime/gc/space/region_space.h20
-rw-r--r--runtime/globals.h6
-rw-r--r--runtime/hidden_api.h142
-rw-r--r--runtime/hidden_api_access_flags.h152
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/imtable-inl.h2
-rw-r--r--runtime/instrumentation.cc15
-rw-r--r--runtime/instrumentation.h4
-rw-r--r--runtime/intern_table.cc2
-rw-r--r--runtime/intern_table_test.cc2
-rw-r--r--runtime/interpreter/interpreter.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc9
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc1
-rw-r--r--runtime/interpreter/shadow_frame.cc2
-rw-r--r--runtime/interpreter/unstarted_runtime.cc23
-rw-r--r--runtime/intrinsics_list.h1
-rw-r--r--runtime/jdwp/jdwp_handler.cc2
-rw-r--r--runtime/jdwp_provider.h3
-rw-r--r--runtime/jit/debugger_interface.cc121
-rw-r--r--runtime/jit/debugger_interface.h41
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/jit/jit.h1
-rw-r--r--runtime/jit/jit_code_cache.cc12
-rw-r--r--runtime/jit/profile_compilation_info.cc296
-rw-r--r--runtime/jit/profile_compilation_info.h149
-rw-r--r--runtime/jit/profile_compilation_info_test.cc359
-rw-r--r--runtime/jni_internal.cc22
-rw-r--r--runtime/leb128.h34
-rw-r--r--runtime/mem_map.cc118
-rw-r--r--runtime/mem_map.h33
-rw-r--r--runtime/mem_map_test.cc200
-rw-r--r--runtime/method_handles.cc23
-rw-r--r--runtime/method_handles_test.cc382
-rw-r--r--runtime/mirror/class-inl.h9
-rw-r--r--runtime/mirror/class.h6
-rw-r--r--runtime/mirror/field.h2
-rw-r--r--runtime/mirror/method_handles_lookup.cc2
-rw-r--r--runtime/mirror/string-inl.h2
-rw-r--r--runtime/mirror/string.cc2
-rw-r--r--runtime/monitor.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc16
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc5
-rw-r--r--runtime/native/dalvik_system_VMStack.cc10
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc36
-rw-r--r--runtime/native/java_lang_Class.cc150
-rw-r--r--runtime/native_stack_dump.cc16
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.cc191
-rw-r--r--runtime/oat_file.h28
-rw-r--r--runtime/oat_file_assistant.cc27
-rw-r--r--runtime/oat_file_assistant.h10
-rw-r--r--runtime/oat_file_assistant_test.cc58
-rw-r--r--runtime/oat_file_manager.cc45
-rw-r--r--runtime/oat_file_manager.h3
-rw-r--r--runtime/oat_file_test.cc28
-rw-r--r--runtime/obj_ptr.h2
-rw-r--r--runtime/parsed_options.cc5
-rw-r--r--runtime/primitive.h58
-rw-r--r--runtime/primitive_test.cc123
-rw-r--r--runtime/quick_exception_handler.cc6
-rw-r--r--runtime/quicken_info.h96
-rw-r--r--runtime/runtime.cc65
-rw-r--r--runtime/runtime.h55
-rw-r--r--runtime/runtime_intrinsics.cc2
-rw-r--r--runtime/runtime_options.def4
-rw-r--r--runtime/stack.cc8
-rw-r--r--runtime/string_reference.h2
-rw-r--r--runtime/subtype_check_bits.h30
-rw-r--r--runtime/subtype_check_bits_and_status.h38
-rw-r--r--runtime/subtype_check_info.h15
-rw-r--r--runtime/subtype_check_info_test.cc56
-rw-r--r--runtime/thread.cc233
-rw-r--r--runtime/thread.h9
-rw-r--r--runtime/ti/agent.cc7
-rw-r--r--runtime/type_lookup_table.cc6
-rw-r--r--runtime/type_lookup_table.h10
-rw-r--r--runtime/type_lookup_table_test.cc2
-rw-r--r--runtime/utils.cc94
-rw-r--r--runtime/utils.h24
-rw-r--r--runtime/vdex_file.cc260
-rw-r--r--runtime/vdex_file.h120
-rw-r--r--runtime/verifier/method_verifier.cc11
-rw-r--r--runtime/verifier/method_verifier.h4
-rw-r--r--runtime/verifier/reg_type-inl.h3
-rw-r--r--runtime/well_known_classes.cc3
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--runtime/zip_archive.cc7
-rw-r--r--runtime/zip_archive.h3
-rw-r--r--test/004-NativeAllocations/src-art/Main.java147
-rw-r--r--test/004-ThreadStress/src-art/Main.java (renamed from test/004-ThreadStress/src/Main.java)104
-rw-r--r--test/044-proxy/src/Main.java4
-rw-r--r--test/044-proxy/src/OOMEOnDispatch.java18
-rwxr-xr-xtest/071-dexfile-get-static-size/build12
-rw-r--r--test/071-dexfile-get-static-size/res/test1.dex (renamed from test/071-dexfile-get-static-size/test1.dex)bin1864 -> 1864 bytes
-rw-r--r--test/071-dexfile-get-static-size/res/test2.dex (renamed from test/071-dexfile-get-static-size/test2.dex)bin1264 -> 1264 bytes
-rw-r--r--test/071-dexfile-get-static-size/src/Main.java20
-rw-r--r--test/137-cfi/cfi.cc33
-rw-r--r--test/137-cfi/src-multidex/Base.java22
-rw-r--r--test/137-cfi/src/Main.java6
-rw-r--r--test/141-class-unload/jni_unload.cc14
-rw-r--r--test/168-vmstack-annotated/expected.txt0
-rw-r--r--test/168-vmstack-annotated/info.txt1
-rw-r--r--test/168-vmstack-annotated/run18
-rw-r--r--test/168-vmstack-annotated/src/Main.java225
-rw-r--r--test/169-threadgroup-jni/expected.txt1
-rw-r--r--test/169-threadgroup-jni/info.txt1
-rw-r--r--test/169-threadgroup-jni/jni_daemon_thread.cc65
-rw-r--r--test/169-threadgroup-jni/src/Main.java39
-rw-r--r--test/305-other-fault-handler/expected.txt2
-rw-r--r--test/305-other-fault-handler/fault_handler.cc102
-rw-r--r--test/305-other-fault-handler/info.txt3
-rw-r--r--test/305-other-fault-handler/src/Main.java25
-rw-r--r--test/449-checker-bce/src/Main.java122
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc3
-rw-r--r--test/530-checker-lse/src/Main.java175
-rw-r--r--test/530-regression-lse/expected.txt0
-rw-r--r--test/530-regression-lse/info.txt2
-rw-r--r--test/530-regression-lse/src/Main.java55
-rw-r--r--test/603-checker-instanceof/src/Main.java2
-rw-r--r--test/608-checker-unresolved-lse/src/Main.java1
-rw-r--r--test/639-checker-code-sinking/expected.txt2
-rw-r--r--test/639-checker-code-sinking/src/Main.java3
-rw-r--r--test/651-checker-int-simd-minmax/src/Main.java16
-rw-r--r--test/672-checker-throw-method/expected.txt1
-rw-r--r--test/672-checker-throw-method/info.txt1
-rw-r--r--test/672-checker-throw-method/src/Main.java316
-rw-r--r--test/673-checker-throw-vmethod/expected.txt1
-rw-r--r--test/673-checker-throw-vmethod/info.txt1
-rw-r--r--test/673-checker-throw-vmethod/src/Main.java219
-rw-r--r--test/674-HelloWorld-Dm/expected.txt1
-rw-r--r--test/674-HelloWorld-Dm/info.txt1
-rw-r--r--test/674-HelloWorld-Dm/run17
-rw-r--r--test/674-HelloWorld-Dm/src/Main.java21
-rw-r--r--test/674-hiddenapi/api-blacklist.txt25
-rw-r--r--test/674-hiddenapi/api-dark-greylist.txt25
-rw-r--r--test/674-hiddenapi/api-light-greylist.txt25
-rw-r--r--test/674-hiddenapi/build38
-rw-r--r--test/674-hiddenapi/check23
-rw-r--r--test/674-hiddenapi/expected.txt0
-rw-r--r--test/674-hiddenapi/hiddenapi.cc298
-rw-r--r--test/674-hiddenapi/info.txt15
-rw-r--r--test/674-hiddenapi/src-art/Main.java157
-rw-r--r--test/674-hiddenapi/src-ex/ChildClass.java438
-rw-r--r--test/674-hiddenapi/src-ex/JNI.java29
-rw-r--r--test/674-hiddenapi/src-ex/Linking.java193
-rw-r--r--test/674-hiddenapi/src-ex/Reflection.java205
-rw-r--r--test/674-hiddenapi/src/NullaryConstructorBlacklist.java21
-rw-r--r--test/674-hiddenapi/src/NullaryConstructorDarkGreylist.java21
-rw-r--r--test/674-hiddenapi/src/NullaryConstructorLightGreylist.java21
-rw-r--r--test/674-hiddenapi/src/NullaryConstructorWhitelist.java21
-rw-r--r--test/674-hiddenapi/src/ParentClass.java133
-rw-r--r--test/674-hiddenapi/src/ParentInterface.java41
-rw-r--r--test/674-hotness-compiled/expected.txt1
-rw-r--r--test/674-hotness-compiled/info.txt1
-rwxr-xr-xtest/674-hotness-compiled/run17
-rw-r--r--test/674-hotness-compiled/src/Main.java46
-rwxr-xr-xtest/674-vdex-uncompress/build19
-rw-r--r--test/674-vdex-uncompress/expected.txt2
-rw-r--r--test/674-vdex-uncompress/info.txt2
-rw-r--r--test/674-vdex-uncompress/run17
-rw-r--r--test/674-vdex-uncompress/src/Main.java37
-rw-r--r--test/710-varhandle-creation/src-art/Main.java92
-rw-r--r--test/714-invoke-custom-lambda-metafactory/build22
-rw-r--r--test/714-invoke-custom-lambda-metafactory/expected.txt4
-rw-r--r--test/714-invoke-custom-lambda-metafactory/info.txt1
-rw-r--r--test/714-invoke-custom-lambda-metafactory/src/Main.java32
-rw-r--r--test/913-heaps/expected_d8.diff20
-rw-r--r--test/959-invoke-polymorphic-accessors/src/Main.java4
-rw-r--r--test/983-source-transform-verify/expected.txt1
-rw-r--r--test/983-source-transform-verify/source_transform.cc26
-rw-r--r--test/983-source-transform-verify/src/art/Test983.java4
-rw-r--r--test/Android.bp9
-rw-r--r--test/Android.run-test.mk1
-rw-r--r--test/HiddenApi/Main.java26
-rw-r--r--test/ManyMethods/ManyMethods.java18
-rw-r--r--test/README.md2
-rw-r--r--test/common/runtime_state.cc38
-rwxr-xr-xtest/etc/default-build47
-rwxr-xr-xtest/etc/run-test-jar38
-rw-r--r--test/knownfailures.json30
-rwxr-xr-xtest/run-test9
-rw-r--r--test/testrunner/target_config.py34
-rwxr-xr-xtest/testrunner/testrunner.py6
-rw-r--r--test/ti-agent/common_helper.h2
-rw-r--r--test/ti-stress/stress.cc2
-rw-r--r--test/valgrind-suppressions.txt10
-rw-r--r--tools/ahat/Android.mk6
-rwxr-xr-xtools/buildbot-build.sh2
-rw-r--r--tools/cpp-define-generator/constant_class.def2
-rw-r--r--tools/cpp-define-generator/constant_globals.def2
-rwxr-xr-xtools/dt_fds_forward.py11
-rw-r--r--tools/external_oj_libjdwp_art_failures.txt7
-rwxr-xr-xtools/generate-boot-image-profile.sh4
-rw-r--r--tools/hiddenapi/Android.bp65
-rw-r--r--tools/hiddenapi/README.md54
-rw-r--r--tools/hiddenapi/hiddenapi.cc378
-rw-r--r--tools/hiddenapi/hiddenapi_test.cc601
-rwxr-xr-xtools/jfuzz/run_dex_fuzz_test.py4
-rwxr-xr-xtools/jfuzz/run_jfuzz_test.py4
-rw-r--r--tools/prebuilt_libjdwp_art_failures.txt7
-rw-r--r--tools/public.libraries.buildbot.txt1
-rwxr-xr-xtools/run-jdwp-tests.sh4
482 files changed, 19946 insertions, 5104 deletions
diff --git a/Android.bp b/Android.bp
index 197860694b..4bcceffcd1 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,6 +1,7 @@
// TODO: These should be handled with transitive static library dependencies
art_static_dependencies = [
// Note: the order is important because of static linking resolution.
+ "libdexfile",
"libziparchive",
"libnativehelper",
"libnativebridge",
@@ -47,6 +48,7 @@ subdirs = [
"tools/breakpoint-logger",
"tools/cpp-define-generator",
"tools/dmtracedump",
+ "tools/hiddenapi",
"tools/titrace",
"tools/wrapagentproperties",
]
diff --git a/Android.mk b/Android.mk
index 0a90a0bb24..361ceecc2f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -447,6 +447,19 @@ LOCAL_REQUIRED_MODULES := libopenjdkd
include $(BUILD_PHONY_PACKAGE)
endif
+# Create dummy hidden API lists which are normally generated by the framework
+# but which we do not have in the master-art manifest.
+# We need to execute this now to ensure Makefile rules depending on these files can
+# be constructed.
+define build-art-hiddenapi
+$(shell if [ ! -d frameworks/base ]; then \
+ mkdir -p ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING; \
+ touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-{blacklist,dark-greylist,light-greylist}.txt; \
+ fi;)
+endef
+
+$(eval $(call build-art-hiddenapi))
+
########################################################################
# "m build-art" for quick minimal build
.PHONY: build-art
@@ -460,6 +473,7 @@ build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TAR
########################################################################
# Phony target for only building what go/lem requires for pushing ART on /data.
+
.PHONY: build-art-target-golem
# Also include libartbenchmark, we always include it when running golem.
# libstdc++ is needed when building for ART_TARGET_LINUX.
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index 127792f6b4..a0c99663b4 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -42,6 +42,7 @@
#include "cutils/sockets.h"
#endif
+#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/eventfd.h>
@@ -49,23 +50,39 @@
namespace adbconnection {
+// Messages sent from the transport
using dt_fd_forward::kListenStartMessage;
using dt_fd_forward::kListenEndMessage;
using dt_fd_forward::kAcceptMessage;
using dt_fd_forward::kCloseMessage;
+// Messages sent to the transport
+using dt_fd_forward::kPerformHandshakeMessage;
+using dt_fd_forward::kSkipHandshakeMessage;
+
using android::base::StringPrintf;
+static constexpr const char kJdwpHandshake[14] = {
+ 'J', 'D', 'W', 'P', '-', 'H', 'a', 'n', 'd', 's', 'h', 'a', 'k', 'e'
+};
+
static constexpr int kEventfdLocked = 0;
static constexpr int kEventfdUnlocked = 1;
static constexpr int kControlSockSendTimeout = 10;
+static constexpr size_t kPacketHeaderLen = 11;
+static constexpr off_t kPacketSizeOff = 0;
+static constexpr off_t kPacketIdOff = 4;
+static constexpr off_t kPacketCommandSetOff = 9;
+static constexpr off_t kPacketCommandOff = 10;
+
+static constexpr uint8_t kDdmCommandSet = 199;
+static constexpr uint8_t kDdmChunkCommand = 1;
+
static AdbConnectionState* gState;
static bool IsDebuggingPossible() {
- // TODO We need to do this on IsJdwpAllowed not IsDebuggable in order to support userdebug
- // workloads. For now we will only allow it when we are debuggable so that testing is easier.
- return art::Runtime::Current()->IsJavaDebuggable() && art::Dbg::IsJdwpAllowed();
+ return art::Dbg::IsJdwpAllowed();
}
// Begin running the debugger.
@@ -118,7 +135,12 @@ AdbConnectionState::AdbConnectionState(const std::string& agent_name)
shutting_down_(false),
agent_loaded_(false),
agent_listening_(false),
- next_ddm_id_(1) {
+ agent_has_socket_(false),
+ sent_agent_fds_(false),
+ performed_handshake_(false),
+ notified_ddm_active_(false),
+ next_ddm_id_(1),
+ started_debugger_threads_(false) {
// Setup the addr.
control_addr_.controlAddrUn.sun_family = AF_UNIX;
control_addr_len_ = sizeof(control_addr_.controlAddrUn.sun_family) + sizeof(kJdwpControlName) - 1;
@@ -153,6 +175,7 @@ struct CallbackData {
static void* CallbackFunction(void* vdata) {
std::unique_ptr<CallbackData> data(reinterpret_cast<CallbackData*>(vdata));
+ CHECK(data->this_ == gState);
art::Thread* self = art::Thread::Attach(kAdbConnectionThreadName,
true,
data->thr_);
@@ -178,6 +201,10 @@ static void* CallbackFunction(void* vdata) {
int detach_result = art::Runtime::Current()->GetJavaVM()->DetachCurrentThread();
CHECK_EQ(detach_result, 0);
+ // Get rid of the connection
+ gState = nullptr;
+ delete data->this_;
+
return nullptr;
}
@@ -226,11 +253,13 @@ void AdbConnectionState::StartDebuggerThreads() {
ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
pthread_t pthread;
std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
+ started_debugger_threads_ = true;
int pthread_create_result = pthread_create(&pthread,
nullptr,
&CallbackFunction,
data.get());
if (pthread_create_result != 0) {
+ started_debugger_threads_ = false;
// If the create succeeded the other thread will call EndThreadBirth.
art::Runtime* runtime = art::Runtime::Current();
soa.Env()->DeleteGlobalRef(data->thr_);
@@ -247,10 +276,32 @@ static bool FlagsSet(int16_t data, int16_t flags) {
}
void AdbConnectionState::CloseFds() {
- // Lock the write_event_fd so that concurrent PublishDdms will see that the connection is closed.
- ScopedEventFdLock lk(adb_write_event_fd_);
- // shutdown(adb_connection_socket_, SHUT_RDWR);
- adb_connection_socket_.reset();
+ {
+ // Lock the write_event_fd so that concurrent PublishDdms will see that the connection is
+ // closed.
+ ScopedEventFdLock lk(adb_write_event_fd_);
+ // shutdown(adb_connection_socket_, SHUT_RDWR);
+ adb_connection_socket_.reset();
+ }
+
+ // If we didn't load anything we will need to do the handshake again.
+ performed_handshake_ = false;
+
+ // If the agent isn't loaded we might need to tell ddms code the connection is closed.
+ if (!agent_loaded_ && notified_ddm_active_) {
+ NotifyDdms(/*active*/false);
+ }
+}
+
+void AdbConnectionState::NotifyDdms(bool active) {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ DCHECK_NE(notified_ddm_active_, active);
+ notified_ddm_active_ = active;
+ if (active) {
+ art::Dbg::DdmConnected();
+ } else {
+ art::Dbg::DdmDisconnected();
+ }
}
uint32_t AdbConnectionState::NextDdmId() {
@@ -259,15 +310,22 @@ uint32_t AdbConnectionState::NextDdmId() {
}
void AdbConnectionState::PublishDdmData(uint32_t type, const art::ArrayRef<const uint8_t>& data) {
+ SendDdmPacket(NextDdmId(), DdmPacketType::kCmd, type, data);
+}
+
+void AdbConnectionState::SendDdmPacket(uint32_t id,
+ DdmPacketType packet_type,
+ uint32_t type,
+ art::ArrayRef<const uint8_t> data) {
// Get the write_event early to fail fast.
ScopedEventFdLock lk(adb_write_event_fd_);
if (adb_connection_socket_ == -1) {
- LOG(WARNING) << "Not sending ddms data of type "
- << StringPrintf("%c%c%c%c",
- static_cast<char>(type >> 24),
- static_cast<char>(type >> 16),
- static_cast<char>(type >> 8),
- static_cast<char>(type)) << " due to no connection!";
+ VLOG(jdwp) << "Not sending ddms data of type "
+ << StringPrintf("%c%c%c%c",
+ static_cast<char>(type >> 24),
+ static_cast<char>(type >> 16),
+ static_cast<char>(type >> 8),
+ static_cast<char>(type)) << " due to no connection!";
// Adb is not connected.
return;
}
@@ -278,7 +336,7 @@ void AdbConnectionState::PublishDdmData(uint32_t type, const art::ArrayRef<const
kJDWPHeaderLen // jdwp command packet size
+ sizeof(uint32_t) // Type
+ sizeof(uint32_t); // length
- std::array<uint8_t, kDdmPacketHeaderSize> pkt;
+ alignas(sizeof(uint32_t)) std::array<uint8_t, kDdmPacketHeaderSize> pkt;
uint8_t* pkt_data = pkt.data();
// Write the length first.
@@ -286,22 +344,35 @@ void AdbConnectionState::PublishDdmData(uint32_t type, const art::ArrayRef<const
pkt_data += sizeof(uint32_t);
// Write the id next;
- *reinterpret_cast<uint32_t*>(pkt_data) = htonl(NextDdmId());
+ *reinterpret_cast<uint32_t*>(pkt_data) = htonl(id);
pkt_data += sizeof(uint32_t);
// next the flags. (0 for cmd packet because DDMS).
- *(pkt_data++) = 0;
- // Now the cmd-set
- *(pkt_data++) = kJDWPDdmCmdSet;
- // Now the command
- *(pkt_data++) = kJDWPDdmCmd;
+ *(pkt_data++) = static_cast<uint8_t>(packet_type);
+ switch (packet_type) {
+ case DdmPacketType::kCmd: {
+ // Now the cmd-set
+ *(pkt_data++) = kJDWPDdmCmdSet;
+ // Now the command
+ *(pkt_data++) = kJDWPDdmCmd;
+ break;
+ }
+ case DdmPacketType::kReply: {
+ // This is the error code bytes which are all 0
+ *(pkt_data++) = 0;
+ *(pkt_data++) = 0;
+ }
+ }
+ // These are at unaligned addresses so we need to do them manually.
// now the type.
- *reinterpret_cast<uint32_t*>(pkt_data) = htonl(type);
+ uint32_t net_type = htonl(type);
+ memcpy(pkt_data, &net_type, sizeof(net_type));
pkt_data += sizeof(uint32_t);
// Now the data.size()
- *reinterpret_cast<uint32_t*>(pkt_data) = htonl(data.size());
+ uint32_t net_len = htonl(data.size());
+ memcpy(pkt_data, &net_len, sizeof(net_len));
pkt_data += sizeof(uint32_t);
static uint32_t constexpr kIovSize = 2;
@@ -329,17 +400,16 @@ void AdbConnectionState::PublishDdmData(uint32_t type, const art::ArrayRef<const
}
}
-void AdbConnectionState::SendAgentFds() {
- // TODO
+void AdbConnectionState::SendAgentFds(bool require_handshake) {
DCHECK(!sent_agent_fds_);
- char dummy = '!';
+ const char* message = require_handshake ? kPerformHandshakeMessage : kSkipHandshakeMessage;
union {
cmsghdr cm;
char buffer[CMSG_SPACE(dt_fd_forward::FdSet::kDataLength)];
} cm_un;
iovec iov;
- iov.iov_base = &dummy;
- iov.iov_len = 1;
+ iov.iov_base = const_cast<char*>(message);
+ iov.iov_len = strlen(message) + 1;
msghdr msg;
msg.msg_name = nullptr;
@@ -461,7 +531,7 @@ bool AdbConnectionState::SetupAdbConnection() {
/* now try to send our pid to the ADB daemon */
ret = TEMP_FAILURE_RETRY(send(sock, buff, sizeof(pid_t), 0));
if (ret == sizeof(pid_t)) {
- LOG(INFO) << "PID " << getpid() << " send to adb";
+ VLOG(jdwp) << "PID " << getpid() << " send to adb";
control_sock_ = std::move(sock);
return true;
} else {
@@ -483,6 +553,7 @@ bool AdbConnectionState::SetupAdbConnection() {
}
void AdbConnectionState::RunPollLoop(art::Thread* self) {
+ CHECK_NE(agent_name_, "");
CHECK_EQ(self->GetState(), art::kNative);
// TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
// exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
@@ -497,6 +568,7 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
return;
}
while (!shutting_down_ && control_sock_ != -1) {
+ bool should_listen_on_connection = !agent_has_socket_ && !sent_agent_fds_;
struct pollfd pollfds[4] = {
{ sleep_event_fd_, POLLIN, 0 },
// -1 as an fd causes it to be ignored by poll
@@ -506,8 +578,8 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
{ (adb_connection_socket_ == -1 ? control_sock_ : -1), POLLIN | POLLRDHUP, 0 },
// if we have not loaded the agent either the adb_connection_socket_ is -1 meaning we don't
// have a real connection yet or the socket through adb needs to be listened to for incoming
- // data that the agent can handle.
- { ((!agent_has_socket_ && !sent_agent_fds_) ? adb_connection_socket_ : -1), POLLIN, 0 }
+ // data that the agent or this plugin can handle.
+ { should_listen_on_connection ? adb_connection_socket_ : -1, POLLIN | POLLRDHUP, 0 }
};
int res = TEMP_FAILURE_RETRY(poll(pollfds, 4, -1));
if (res < 0) {
@@ -533,7 +605,7 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
if (memcmp(kListenStartMessage, buf, sizeof(kListenStartMessage)) == 0) {
agent_listening_ = true;
if (adb_connection_socket_ != -1) {
- SendAgentFds();
+ SendAgentFds(/*require_handshake*/ !performed_handshake_);
}
} else if (memcmp(kListenEndMessage, buf, sizeof(kListenEndMessage)) == 0) {
agent_listening_ = false;
@@ -543,6 +615,8 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
} else if (memcmp(kAcceptMessage, buf, sizeof(kAcceptMessage)) == 0) {
agent_has_socket_ = true;
sent_agent_fds_ = false;
+ // We will only ever do the handshake once so reset this.
+ performed_handshake_ = false;
} else {
LOG(ERROR) << "Unknown message received from debugger! '" << std::string(buf) << "'";
}
@@ -571,7 +645,9 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
}
if (maybe_send_fds && agent_loaded_ && agent_listening_) {
VLOG(jdwp) << "Sending fds as soon as we received them.";
- SendAgentFds();
+ // The agent was already loaded so this must be after a disconnection. Therefore have the
+ // transport perform the handshake.
+ SendAgentFds(/*require_handshake*/ true);
}
} else if (FlagsSet(control_sock_poll.revents, POLLRDHUP)) {
// The other end of the adb connection just dropped it.
@@ -583,24 +659,15 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
} else if (FlagsSet(adb_socket_poll.revents, POLLIN)) {
DCHECK(!agent_has_socket_);
if (!agent_loaded_) {
- DCHECK(!agent_listening_);
- // Load the agent now!
- self->AssertNoPendingException();
- art::Runtime::Current()->AttachAgent(/* JNIEnv* */ nullptr,
- MakeAgentArg(),
- /* classloader */ nullptr);
- if (self->IsExceptionPending()) {
- LOG(ERROR) << "Failed to load agent " << agent_name_;
- art::ScopedObjectAccess soa(self);
- self->GetException()->Dump();
- self->ClearException();
- return;
- }
- agent_loaded_ = true;
+ HandleDataWithoutAgent(self);
} else if (agent_listening_ && !sent_agent_fds_) {
VLOG(jdwp) << "Sending agent fds again on data.";
- SendAgentFds();
+ // Agent was already loaded so it can deal with the handshake.
+ SendAgentFds(/*require_handshake*/ true);
}
+ } else if (FlagsSet(adb_socket_poll.revents, POLLRDHUP)) {
+ DCHECK(!agent_has_socket_);
+ CloseFds();
} else {
VLOG(jdwp) << "Woke up poll without anything to do!";
}
@@ -608,11 +675,205 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
}
}
+static uint32_t ReadUint32AndAdvance(/*in-out*/uint8_t** in) {
+ uint32_t res;
+ memcpy(&res, *in, sizeof(uint32_t));
+ *in = (*in) + sizeof(uint32_t);
+ return ntohl(res);
+}
+
+void AdbConnectionState::HandleDataWithoutAgent(art::Thread* self) {
+ DCHECK(!agent_loaded_);
+ DCHECK(!agent_listening_);
+ // TODO Should we check in some other way if we are userdebug/eng?
+ CHECK(art::Dbg::IsJdwpAllowed());
+ // We try to avoid loading the agent which is expensive. First lets just perform the handshake.
+ if (!performed_handshake_) {
+ PerformHandshake();
+ return;
+ }
+ // Read the packet header to figure out if it is one we can handle. We only 'peek' into the stream
+ // to see if it's one we can handle. This doesn't change the state of the socket.
+ alignas(sizeof(uint32_t)) uint8_t packet_header[kPacketHeaderLen];
+ ssize_t res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(),
+ packet_header,
+ sizeof(packet_header),
+ MSG_PEEK));
+ // We want to be very careful not to change the socket state until we know we succeeded. This will
+ // let us fall-back to just loading the agent and letting it deal with everything.
+ if (res <= 0) {
+ // Close the socket. We either hit EOF or an error.
+ if (res < 0) {
+ PLOG(ERROR) << "Unable to peek into adb socket due to error. Closing socket.";
+ }
+ CloseFds();
+ return;
+ } else if (res < static_cast<int>(kPacketHeaderLen)) {
+ LOG(ERROR) << "Unable to peek into adb socket. Loading agent to handle this. Only read " << res;
+ AttachJdwpAgent(self);
+ return;
+ }
+ uint32_t full_len = ntohl(*reinterpret_cast<uint32_t*>(packet_header + kPacketSizeOff));
+ uint32_t pkt_id = ntohl(*reinterpret_cast<uint32_t*>(packet_header + kPacketIdOff));
+ uint8_t pkt_cmd_set = packet_header[kPacketCommandSetOff];
+ uint8_t pkt_cmd = packet_header[kPacketCommandOff];
+ if (pkt_cmd_set != kDdmCommandSet ||
+ pkt_cmd != kDdmChunkCommand ||
+ full_len < kPacketHeaderLen) {
+ VLOG(jdwp) << "Loading agent due to jdwp packet that cannot be handled by adbconnection.";
+ AttachJdwpAgent(self);
+ return;
+ }
+ uint32_t avail = -1;
+ res = TEMP_FAILURE_RETRY(ioctl(adb_connection_socket_.get(), FIONREAD, &avail));
+ if (res < 0) {
+ PLOG(ERROR) << "Failed to determine amount of readable data in socket! Closing connection";
+ CloseFds();
+ return;
+ } else if (avail < full_len) {
+ LOG(WARNING) << "Unable to handle ddm command in adbconnection due to insufficent data. "
+ << "Expected " << full_len << " bytes but only " << avail << " are readable. "
+ << "Loading jdwp agent to deal with this.";
+ AttachJdwpAgent(self);
+ return;
+ }
+ // Actually read the data.
+ std::vector<uint8_t> full_pkt;
+ full_pkt.resize(full_len);
+ res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(), full_pkt.data(), full_len, 0));
+ if (res < 0) {
+ PLOG(ERROR) << "Failed to recv data from adb connection. Closing connection";
+ CloseFds();
+ return;
+ }
+ DCHECK_EQ(memcmp(full_pkt.data(), packet_header, sizeof(packet_header)), 0);
+ size_t data_size = full_len - kPacketHeaderLen;
+ if (data_size < (sizeof(uint32_t) * 2)) {
+ // This is an error (the data isn't long enough) but to match historical behavior we need to
+ // ignore it.
+ return;
+ }
+ uint8_t* ddm_data = full_pkt.data() + kPacketHeaderLen;
+ uint32_t ddm_type = ReadUint32AndAdvance(&ddm_data);
+ uint32_t ddm_len = ReadUint32AndAdvance(&ddm_data);
+ if (ddm_len > data_size - (2 * sizeof(uint32_t))) {
+ // This is an error (the data isn't long enough) but to match historical behavior we need to
+ // ignore it.
+ return;
+ }
+
+ if (!notified_ddm_active_) {
+ NotifyDdms(/*active*/ true);
+ }
+ uint32_t reply_type;
+ std::vector<uint8_t> reply;
+ if (!art::Dbg::DdmHandleChunk(self->GetJniEnv(),
+ ddm_type,
+ art::ArrayRef<const jbyte>(reinterpret_cast<const jbyte*>(ddm_data),
+ ddm_len),
+ /*out*/&reply_type,
+ /*out*/&reply)) {
+ // To match historical behavior we don't send any response when there is no data to reply with.
+ return;
+ }
+ SendDdmPacket(pkt_id,
+ DdmPacketType::kReply,
+ reply_type,
+ art::ArrayRef<const uint8_t>(reply));
+}
+
+void AdbConnectionState::PerformHandshake() {
+ CHECK(!performed_handshake_);
+ // Check to make sure we are able to read the whole handshake.
+ uint32_t avail = -1;
+ int res = TEMP_FAILURE_RETRY(ioctl(adb_connection_socket_.get(), FIONREAD, &avail));
+ if (res < 0 || avail < sizeof(kJdwpHandshake)) {
+ if (res < 0) {
+ PLOG(ERROR) << "Failed to determine amount of readable data for handshake!";
+ }
+ LOG(WARNING) << "Closing connection to broken client.";
+ CloseFds();
+ return;
+ }
+ // Perform the handshake.
+ char handshake_msg[sizeof(kJdwpHandshake)];
+ res = TEMP_FAILURE_RETRY(recv(adb_connection_socket_.get(),
+ handshake_msg,
+ sizeof(handshake_msg),
+ MSG_DONTWAIT));
+ if (res < static_cast<int>(sizeof(kJdwpHandshake)) ||
+ strncmp(handshake_msg, kJdwpHandshake, sizeof(kJdwpHandshake)) != 0) {
+ if (res < 0) {
+ PLOG(ERROR) << "Failed to read handshake!";
+ }
+ LOG(WARNING) << "Handshake failed!";
+ CloseFds();
+ return;
+ }
+ // Send the handshake back.
+ res = TEMP_FAILURE_RETRY(send(adb_connection_socket_.get(),
+ kJdwpHandshake,
+ sizeof(kJdwpHandshake),
+ 0));
+ if (res < static_cast<int>(sizeof(kJdwpHandshake))) {
+ PLOG(ERROR) << "Failed to send jdwp-handshake response.";
+ CloseFds();
+ return;
+ }
+ performed_handshake_ = true;
+}
+
+void AdbConnectionState::AttachJdwpAgent(art::Thread* self) {
+ self->AssertNoPendingException();
+ art::Runtime::Current()->AttachAgent(/* JNIEnv */ nullptr,
+ MakeAgentArg(),
+ /* classloader */ nullptr,
+ /*allow_non_debuggable_tooling*/ true);
+ if (self->IsExceptionPending()) {
+ LOG(ERROR) << "Failed to load agent " << agent_name_;
+ art::ScopedObjectAccess soa(self);
+ self->GetException()->Dump();
+ self->ClearException();
+ return;
+ }
+ agent_loaded_ = true;
+}
+
+bool ContainsArgument(const std::string& opts, const char* arg) {
+ return opts.find(arg) != std::string::npos;
+}
+
+bool ValidateJdwpOptions(const std::string& opts) {
+ bool res = true;
+ // The adbconnection plugin requires that the jdwp agent be configured as a 'server' because that
+ // is what adb expects and otherwise we will hit a deadlock as the poll loop thread stops waiting
+ // for the fd's to be passed down.
+ if (ContainsArgument(opts, "server=n")) {
+ res = false;
+ LOG(ERROR) << "Cannot start jdwp debugging with server=n from adbconnection.";
+ }
+ // We don't start the jdwp agent until threads are already running. It is far too late to suspend
+ // everything.
+ if (ContainsArgument(opts, "suspend=y")) {
+ res = false;
+ LOG(ERROR) << "Cannot use suspend=y with late-init jdwp.";
+ }
+ return res;
+}
+
std::string AdbConnectionState::MakeAgentArg() {
- // TODO Get this from something user settable?
const std::string& opts = art::Runtime::Current()->GetJdwpOptions();
- return agent_name_ + "=" + opts + (opts.empty() ? "" : ",")
- + "transport=dt_fd_forward,address=" + std::to_string(remote_agent_control_sock_);
+ DCHECK(ValidateJdwpOptions(opts));
+ // TODO Get agent_name_ from something user settable?
+ return agent_name_ + "=" + opts + (opts.empty() ? "" : ",") +
+ "ddm_already_active=" + (notified_ddm_active_ ? "y" : "n") + "," +
+ // See the comment above for why we need to be server=y. Since the agent defaults to server=n
+ // we will add it if it wasn't already present for the convenience of the user.
+ (ContainsArgument(opts, "server=y") ? "" : "server=y,") +
+ // See the comment above for why we need to be suspend=n. Since the agent defaults to
+ // suspend=y we will add it if it wasn't already present.
+ (ContainsArgument(opts, "suspend=n") ? "" : "suspend=n") +
+ "transport=dt_fd_forward,address=" + std::to_string(remote_agent_control_sock_);
}
void AdbConnectionState::StopDebuggerThreads() {
@@ -620,24 +881,26 @@ void AdbConnectionState::StopDebuggerThreads() {
shutting_down_ = true;
// Wakeup the poll loop.
uint64_t data = 1;
- TEMP_FAILURE_RETRY(write(sleep_event_fd_, &data, sizeof(data)));
+ if (sleep_event_fd_ != -1) {
+ TEMP_FAILURE_RETRY(write(sleep_event_fd_, &data, sizeof(data)));
+ }
}
// The plugin initialization function.
extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK(art::Runtime::Current()->GetJdwpProvider() == art::JdwpProvider::kAdbConnection);
// TODO Provide some way for apps to set this maybe?
+ DCHECK(gState == nullptr);
gState = new AdbConnectionState(kDefaultJdwpAgentName);
- CHECK(gState != nullptr);
- return true;
+ return ValidateJdwpOptions(art::Runtime::Current()->GetJdwpOptions());
}
extern "C" bool ArtPlugin_Deinitialize() {
- CHECK(gState != nullptr);
- // Just do this a second time?
- // TODO I don't think this should be needed.
gState->StopDebuggerThreads();
- delete gState;
+ if (!gState->DebuggerThreadsStarted()) {
+ // If debugger threads were started then those threads will delete the state once they are done.
+ delete gState;
+ }
return true;
}
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index 28a5a05af3..04e39bf4ff 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -24,6 +24,7 @@
#include "android-base/unique_fd.h"
#include "base/mutex.h"
+#include "base/array_ref.h"
#include "runtime_callbacks.h"
#include <sys/socket.h>
@@ -56,6 +57,8 @@ struct AdbConnectionDebuggerController : public art::DebuggerControlCallback {
AdbConnectionState* connection_;
};
+enum class DdmPacketType : uint8_t { kReply = 0x80, kCmd = 0x00, };
+
struct AdbConnectionDdmCallback : public art::DdmCallback {
explicit AdbConnectionDdmCallback(AdbConnectionState* connection) : connection_(connection) {}
@@ -69,7 +72,7 @@ struct AdbConnectionDdmCallback : public art::DdmCallback {
class AdbConnectionState {
public:
- explicit AdbConnectionState(const std::string& agent_name);
+ explicit AdbConnectionState(const std::string& name);
// Called on the listening thread to start dealing with new input. thr is used to attach the new
// thread to the runtime.
@@ -82,6 +85,11 @@ class AdbConnectionState {
// Stops debugger threads during shutdown.
void StopDebuggerThreads();
+ // If StartDebuggerThreads was called successfully.
+ bool DebuggerThreadsStarted() {
+ return started_debugger_threads_;
+ }
+
private:
uint32_t NextDdmId();
@@ -94,10 +102,23 @@ class AdbConnectionState {
android::base::unique_fd ReadFdFromAdb();
- void SendAgentFds();
+ void SendAgentFds(bool require_handshake);
void CloseFds();
+ void HandleDataWithoutAgent(art::Thread* self);
+
+ void PerformHandshake();
+
+ void AttachJdwpAgent(art::Thread* self);
+
+ void NotifyDdms(bool active);
+
+ void SendDdmPacket(uint32_t id,
+ DdmPacketType type,
+ uint32_t ddm_type,
+ art::ArrayRef<const uint8_t> data);
+
std::string agent_name_;
AdbConnectionDebuggerController controller_;
@@ -139,8 +160,14 @@ class AdbConnectionState {
std::atomic<bool> sent_agent_fds_;
+ bool performed_handshake_;
+
+ bool notified_ddm_active_;
+
std::atomic<uint32_t> next_ddm_id_;
+ bool started_debugger_threads_;
+
socklen_t control_addr_len_;
union {
sockaddr_un controlAddrUn;
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 3d1f4343f1..7fae7f6200 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -23,10 +23,10 @@ include art/build/Android.common_path.mk
ifneq ($(TMPDIR),)
ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID)
else
-# Use a BSD checksum calculated from PPID and USER as one of the path
+# Use a BSD checksum calculated from CWD and USER as one of the path
# components for the test output. This should allow us to run tests from
# multiple repositories at the same time.
-ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$PPID-${USER} | sum | cut -d ' ' -f1)
+ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$CWD-${USER} | sum | cut -d ' ' -f1)
endif
# List of known broken tests that we won't attempt to execute. The test name must be the full
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1f36cb4e46..c8f661561f 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -36,6 +36,7 @@ GTEST_DEX_DIRECTORIES := \
ForClassLoaderD \
ExceptionHandle \
GetMethodSignature \
+ HiddenApi \
ImageLayoutA \
ImageLayoutB \
IMTA \
@@ -72,6 +73,11 @@ $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gte
ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+# Create rules for MainUncompressed, a copy of Main with the classes.dex uncompressed
+# for the dex2oat tests.
+ART_TEST_HOST_GTEST_MainUncompressed_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Uncompressed$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_MainUncompressed_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Uncompressed$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+
$(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX)
cp $< $@
$(call dexpreopt-remove-classes.dex,$@)
@@ -80,6 +86,16 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
cp $< $@
$(call dexpreopt-remove-classes.dex,$@)
+$(ART_TEST_HOST_GTEST_MainUncompressed_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) $(ZIPALIGN)
+ cp $< $@
+ $(call uncompress-dexs, $@)
+ $(call align-package, $@)
+
+$(ART_TEST_TARGET_GTEST_MainUncompressed_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) $(ZIPALIGN)
+ cp $< $@
+ $(call uncompress-dexs, $@)
+ $(call align-package, $@)
+
ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali))
ART_TEST_GTEST_VerifierDepsMulti_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDepsMulti/*.smali))
ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
@@ -110,9 +126,10 @@ ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods Prof
ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested MultiDex
ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
+ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
@@ -155,6 +172,11 @@ ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_interpreter_32) \
patchoatd-target
+ART_GTEST_oat_file_test_HOST_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
+ART_GTEST_oat_file_test_TARGET_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+
ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
@@ -261,6 +283,11 @@ ART_GTEST_patchoat_test_TARGET_DEPS := \
ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
ART_GTEST_profile_assistant_test_TARGET_DEPS := profmand-target
+ART_GTEST_hiddenapi_test_HOST_DEPS := \
+ $(HOST_CORE_IMAGE_DEFAULT_64) \
+ $(HOST_CORE_IMAGE_DEFAULT_32) \
+ hiddenapid-host
+
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -274,6 +301,7 @@ ART_TEST_MODULES := \
art_dexlayout_tests \
art_dexlist_tests \
art_dexoptanalyzer_tests \
+ art_hiddenapi_tests \
art_imgdiag_tests \
art_oatdump_tests \
art_patchoat_tests \
@@ -438,13 +466,27 @@ define define-art-gtest-rule-host
ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
+.PHONY: $$(gtest_rule)
+ifeq (,$(SANITIZE_HOST))
+$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && $$< && \
+ $$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@)
+else
# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
# build tools (e.g., ninja) intentionally leak. We want leak checks when we run our tests, so
# override ASAN_OPTIONS. b/37751350
-.PHONY: $$(gtest_rule)
+# Note 2: Under sanitization, also capture the output, and run it through the stack tool on failure
+# (with the x86-64 ABI, as this allows symbolization of both x86 and x86-64). We don't do this in
+# general as it loses all the color output, and we have our own symbolization step when not running
+# under ASAN.
$$(gtest_rule): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$@) && ASAN_OPTIONS=detect_leaks=1 $$< && \
- $$(call ART_TEST_PASSED,$$@)) || $$(call ART_TEST_FAILED,$$@)
+ $(hide) ($$(call ART_TEST_SKIP,$$@) && set -o pipefail && \
+ ASAN_OPTIONS=detect_leaks=1 $$< 2>&1 | tee $$<.tmp.out >&2 && \
+ { $$(call ART_TEST_PASSED,$$@) ; rm $$<.tmp.out ; }) || \
+ ( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
+ { echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
+ rm $$<.tmp.out ; $$(call ART_TEST_FAILED,$$@))
+endif
ART_TEST_HOST_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += $$(gtest_rule)
ART_TEST_HOST_GTEST_RULES += $$(gtest_rule)
@@ -706,6 +748,8 @@ $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
ART_TEST_HOST_GTEST_MainStripped_DEX :=
ART_TEST_TARGET_GTEST_MainStripped_DEX :=
+ART_TEST_HOST_GTEST_MainUncompressed_DEX :=
+ART_TEST_TARGET_GTEST_MainUncompressed_DEX :=
ART_TEST_GTEST_VerifierDeps_SRC :=
ART_TEST_HOST_GTEST_VerifierDeps_DEX :=
ART_TEST_TARGET_GTEST_VerifierDeps_DEX :=
diff --git a/build/art.go b/build/art.go
index bf6eee6c41..59480a0d0f 100644
--- a/build/art.go
+++ b/build/art.go
@@ -66,7 +66,7 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
}
- cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "none")
+ cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
// We need larger stack overflow guards for ASAN, as the compiled code will have
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 5d672061df..3cb9731a17 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -369,13 +369,13 @@ TEST_F(CmdlineParserTest, DISABLED_TestXGcOption) {
*/
TEST_F(CmdlineParserTest, TestJdwpProviderEmpty) {
{
- EXPECT_SINGLE_PARSE_DEFAULT_VALUE(JdwpProvider::kInternal, "", M::JdwpProvider);
+ EXPECT_SINGLE_PARSE_DEFAULT_VALUE(JdwpProvider::kNone, "", M::JdwpProvider);
}
} // TEST_F
TEST_F(CmdlineParserTest, TestJdwpProviderDefault) {
const char* opt_args = "-XjdwpProvider:default";
- EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kInternal, opt_args, M::JdwpProvider);
+ EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kDefaultJdwpProvider, opt_args, M::JdwpProvider);
} // TEST_F
TEST_F(CmdlineParserTest, TestJdwpProviderInternal) {
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index d0d6bfd3ce..c8be69d922 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -76,9 +76,10 @@ struct CmdlineType<JdwpProvider> : CmdlineTypeParser<JdwpProvider> {
"Example: -XjdwpProvider:none to disable JDWP\n"
"Example: -XjdwpProvider:internal for internal jdwp implementation\n"
"Example: -XjdwpProvider:adbconnection for adb connection mediated jdwp implementation\n"
- "Example: -XjdwpProvider:default for the default jdwp implementation"
- " (currently internal)\n");
- } else if (option == "internal" || option == "default") {
+ "Example: -XjdwpProvider:default for the default jdwp implementation\n");
+ } else if (option == "default") {
+ return Result::Success(JdwpProvider::kDefaultJdwpProvider);
+ } else if (option == "internal") {
return Result::Success(JdwpProvider::kInternal);
} else if (option == "adbconnection") {
return Result::Success(JdwpProvider::kAdbConnection);
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 164f9c1e8f..453965947d 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -89,6 +89,7 @@ art_cc_defaults {
"optimizing/ssa_liveness_analysis.cc",
"optimizing/ssa_phi_elimination.cc",
"optimizing/stack_map_stream.cc",
+ "optimizing/superblock_cloner.cc",
"trampolines/trampoline_compiler.cc",
"utils/assembler.cc",
"utils/jni_macro_assembler.cc",
@@ -183,6 +184,7 @@ art_cc_defaults {
},
generated_sources: ["art_compiler_operator_srcs"],
shared_libs: [
+ "libdexfile",
"libbase",
"libcutils", // for atrace.
"liblzma",
@@ -249,6 +251,12 @@ art_cc_library {
shared_libs: [
"libart",
],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ }
}
art_cc_library {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 05fdc97e07..8af29d44f0 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -23,7 +23,6 @@
#include "common_runtime_test.h"
#include "compiler.h"
-#include "jit/profile_compilation_info.h"
#include "oat_file.h"
namespace art {
@@ -34,6 +33,7 @@ class ClassLoader;
class CompilerDriver;
class CompilerOptions;
class CumulativeLogger;
+class ProfileCompilationInfo;
class VerificationResults;
template<class T> class Handle;
diff --git a/compiler/debug/debug_info.h b/compiler/debug/debug_info.h
new file mode 100644
index 0000000000..04c6991ea3
--- /dev/null
+++ b/compiler/debug/debug_info.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_DEBUG_INFO_H_
+#define ART_COMPILER_DEBUG_DEBUG_INFO_H_
+
+#include <map>
+
+#include "base/array_ref.h"
+#include "method_debug_info.h"
+
+namespace art {
+class DexFile;
+
+namespace debug {
+
+// References inputs for all debug information which can be written into the ELF file.
+struct DebugInfo {
+ // Describes compiled code in the .text section.
+ ArrayRef<const MethodDebugInfo> compiled_methods;
+
+ // Describes dex-files in the .dex section.
+ std::map<uint32_t, const DexFile*> dex_files; // Offset in section -> dex file content.
+
+ bool Empty() const {
+ return compiled_methods.empty() && dex_files.empty();
+ }
+};
+
+} // namespace debug
+} // namespace art
+
+#endif // ART_COMPILER_DEBUG_DEBUG_INFO_H_
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index 713f8eb05d..893cad288b 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -49,7 +49,7 @@ static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
- CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index);
if (accessor.HasCodeItem()) {
DCHECK(mi->dex_file != nullptr);
const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset());
@@ -163,7 +163,7 @@ class ElfCompilationUnitWriter {
for (auto mi : compilation_unit.methods) {
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
- CodeItemDebugInfoAccessor accessor(*dex, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 4e37f4e4ba..44504c1efb 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -159,7 +159,7 @@ class ElfDebugLineWriter {
PositionInfos dex2line_map;
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
- CodeItemDebugInfoAccessor accessor(*dex, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
const uint32_t debug_info_offset = accessor.DebugInfoOffset();
if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) {
continue;
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index a6267292bf..df5bb37358 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -38,18 +38,18 @@ namespace debug {
template <typename ElfTypes>
void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches) {
// Write .strtab and .symtab.
- WriteDebugSymbols(builder, method_infos, true /* with_signature */);
+ WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
// Write .debug_frame.
- WriteCFISection(builder, method_infos, cfi_format, write_oat_patches);
+ WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
// Group the methods into compilation units based on class.
std::unordered_map<const DexFile::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
- for (const MethodDebugInfo& mi : method_infos) {
+ for (const MethodDebugInfo& mi : debug_info.compiled_methods) {
if (mi.dex_file != nullptr) {
auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
ElfCompilationUnit& cu = class_to_compilation_unit[&dex_class_def];
@@ -108,21 +108,27 @@ void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
std::vector<uint8_t> MakeMiniDebugInfo(
InstructionSet isa,
const InstructionSetFeatures* features,
- uint64_t text_address,
- size_t text_size,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+ uint64_t text_section_address,
+ size_t text_section_size,
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info) {
if (Is64BitInstructionSet(isa)) {
return MakeMiniDebugInfoInternal<ElfTypes64>(isa,
features,
- text_address,
- text_size,
- method_infos);
+ text_section_address,
+ text_section_size,
+ dex_section_address,
+ dex_section_size,
+ debug_info);
} else {
return MakeMiniDebugInfoInternal<ElfTypes32>(isa,
features,
- text_address,
- text_size,
- method_infos);
+ text_section_address,
+ text_section_size,
+ dex_section_address,
+ dex_section_size,
+ debug_info);
}
}
@@ -131,9 +137,17 @@ static std::vector<uint8_t> MakeElfFileForJITInternal(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& mi) {
- CHECK_EQ(mi.is_code_address_text_relative, false);
- ArrayRef<const MethodDebugInfo> method_infos(&mi, 1);
+ ArrayRef<const MethodDebugInfo> method_infos) {
+ CHECK_GT(method_infos.size(), 0u);
+ uint64_t min_address = std::numeric_limits<uint64_t>::max();
+ uint64_t max_address = 0;
+ for (const MethodDebugInfo& mi : method_infos) {
+ CHECK_EQ(mi.is_code_address_text_relative, false);
+ min_address = std::min(min_address, mi.code_address);
+ max_address = std::max(max_address, mi.code_address + mi.code_size);
+ }
+ DebugInfo debug_info{};
+ debug_info.compiled_methods = method_infos;
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -144,14 +158,16 @@ static std::vector<uint8_t> MakeElfFileForJITInternal(
if (mini_debug_info) {
std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
features,
- mi.code_address,
- mi.code_size,
- method_infos);
+ min_address,
+ max_address - min_address,
+ /* dex_section_address */ 0,
+ /* dex_section_size */ 0,
+ debug_info);
builder->WriteSection(".gnu_debugdata", &mdi);
} else {
- builder->GetText()->AllocateVirtualMemory(mi.code_address, mi.code_size);
+ builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
WriteDebugInfo(builder.get(),
- method_infos,
+ debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
false /* write_oat_patches */);
}
@@ -164,11 +180,11 @@ std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& method_info) {
+ ArrayRef<const MethodDebugInfo> method_infos) {
if (Is64BitInstructionSet(isa)) {
- return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_info);
+ return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
} else {
- return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_info);
+ return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
}
}
@@ -209,12 +225,12 @@ std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
linker::ElfBuilder<ElfTypes32>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
template void WriteDebugInfo<ElfTypes64>(
linker::ElfBuilder<ElfTypes64>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index a47bf076b9..e442e0016c 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "debug/dwarf/dwarf_constants.h"
+#include "debug/debug_info.h"
#include "linker/elf_builder.h"
namespace art {
@@ -36,7 +37,7 @@ struct MethodDebugInfo;
template <typename ElfTypes>
void WriteDebugInfo(
linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
@@ -45,13 +46,15 @@ std::vector<uint8_t> MakeMiniDebugInfo(
const InstructionSetFeatures* features,
uint64_t text_section_address,
size_t text_section_size,
- const ArrayRef<const MethodDebugInfo>& method_infos);
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info);
std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& method_info);
+ ArrayRef<const MethodDebugInfo> method_infos);
std::vector<uint8_t> WriteDebugElfFileForClasses(
InstructionSet isa,
diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h
index 78b8e2780c..a88c5cb213 100644
--- a/compiler/debug/elf_gnu_debugdata_writer.h
+++ b/compiler/debug/elf_gnu_debugdata_writer.h
@@ -82,18 +82,23 @@ static std::vector<uint8_t> MakeMiniDebugInfoInternal(
const InstructionSetFeatures* features,
typename ElfTypes::Addr text_section_address,
size_t text_section_size,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+ typename ElfTypes::Addr dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info) {
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
builder->Start(false /* write_program_headers */);
- // Mirror .text as NOBITS section since the added symbols will reference it.
+ // Mirror ELF sections as NOBITS since the added symbols will reference them.
builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
- WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */);
+ if (dex_section_size != 0) {
+ builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
+ }
+ WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
WriteCFISection(builder.get(),
- method_infos,
+ debug_info.compiled_methods,
dwarf::DW_DEBUG_FRAME_FORMAT,
false /* write_oat_paches */);
builder->End();
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index 57e010f232..4b19547d28 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -17,9 +17,13 @@
#ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
#define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
+#include <map>
#include <unordered_set>
+#include "debug/debug_info.h"
#include "debug/method_debug_info.h"
+#include "dex/dex_file-inl.h"
+#include "dex/code_item_accessors.h"
#include "linker/elf_builder.h"
#include "utils.h"
@@ -35,22 +39,26 @@ namespace debug {
// one symbol which marks the whole .text section as code.
constexpr bool kGenerateSingleArmMappingSymbol = true;
+// Magic name for .symtab symbols which enumerate dex files used
+// by this ELF file (currently mmapped inside the .dex section).
+constexpr const char* kDexFileSymbolName = "$dexfile";
+
template <typename ElfTypes>
static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
- bool with_signature) {
+ bool mini_debug_info,
+ const DebugInfo& debug_info) {
uint64_t mapping_symbol_address = std::numeric_limits<uint64_t>::max();
auto* strtab = builder->GetStrTab();
auto* symtab = builder->GetSymTab();
- if (method_infos.empty()) {
+ if (debug_info.Empty()) {
return;
}
// Find all addresses which contain deduped methods.
// The first instance of method is not marked deduped_, but the rest is.
std::unordered_set<uint64_t> deduped_addresses;
- for (const MethodDebugInfo& info : method_infos) {
+ for (const MethodDebugInfo& info : debug_info.compiled_methods) {
if (info.deduped) {
deduped_addresses.insert(info.code_address);
}
@@ -58,25 +66,21 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
strtab->Start();
strtab->Write(""); // strtab should start with empty string.
- std::string last_name;
- size_t last_name_offset = 0;
- for (const MethodDebugInfo& info : method_infos) {
+ // Add symbols for compiled methods.
+ for (const MethodDebugInfo& info : debug_info.compiled_methods) {
if (info.deduped) {
continue; // Add symbol only for the first instance.
}
size_t name_offset;
- if (!info.trampoline_name.empty()) {
- name_offset = strtab->Write(info.trampoline_name);
+ if (!info.custom_name.empty()) {
+ name_offset = strtab->Write(info.custom_name);
} else {
DCHECK(info.dex_file != nullptr);
- std::string name = info.dex_file->PrettyMethod(info.dex_method_index, with_signature);
+ std::string name = info.dex_file->PrettyMethod(info.dex_method_index, !mini_debug_info);
if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) {
name += " [DEDUPED]";
}
- // If we write method names without signature, we might see the same name multiple times.
- name_offset = (name == last_name ? last_name_offset : strtab->Write(name));
- last_name = std::move(name);
- last_name_offset = name_offset;
+ name_offset = strtab->Write(name);
}
const auto* text = builder->GetText();
@@ -97,13 +101,20 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
}
}
}
+ // Add symbols for dex files.
+ if (!debug_info.dex_files.empty() && builder->GetDex()->Exists()) {
+ auto dex = builder->GetDex();
+ for (auto it : debug_info.dex_files) {
+ uint64_t dex_address = dex->GetAddress() + it.first /* offset within the section */;
+ const DexFile* dex_file = it.second;
+ typename ElfTypes::Word dex_name = strtab->Write(kDexFileSymbolName);
+ symtab->Add(dex_name, dex, dex_address, dex_file->Size(), STB_GLOBAL, STT_FUNC);
+ }
+ }
strtab->End();
// Symbols are buffered and written after names (because they are smaller).
- // We could also do two passes in this function to avoid the buffering.
- symtab->Start();
- symtab->Write();
- symtab->End();
+ symtab->WriteCachedSection();
}
} // namespace debug
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index 43c8de26aa..d0b03ec441 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -27,7 +27,7 @@ namespace art {
namespace debug {
struct MethodDebugInfo {
- std::string trampoline_name;
+ std::string custom_name;
const DexFile* dex_file; // Native methods (trampolines) do not reference dex file.
size_t class_def_index;
uint32_t dex_method_index;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 52cb217980..9f0aaa4e10 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -28,6 +28,7 @@
#include "compiled_method.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_instruction-inl.h"
+#include "dex_to_dex_decompiler.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "mirror/dex_cache.h"
@@ -44,34 +45,30 @@ const bool kEnableQuickening = true;
// Control check-cast elision.
const bool kEnableCheckCastEllision = true;
-struct QuickenedInfo {
- QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
+// Holds the state for compiling a single method.
+struct DexToDexCompiler::CompilationState {
+ struct QuickenedInfo {
+ QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
- uint32_t dex_pc;
- uint16_t dex_member_index;
-};
-
-class DexCompiler {
- public:
- DexCompiler(art::CompilerDriver& compiler,
- const DexCompilationUnit& unit,
- DexToDexCompilationLevel dex_to_dex_compilation_level)
- : driver_(compiler),
- unit_(unit),
- dex_to_dex_compilation_level_(dex_to_dex_compilation_level) {}
+ uint32_t dex_pc;
+ uint16_t dex_member_index;
+ };
- ~DexCompiler() {}
-
- void Compile();
+ CompilationState(DexToDexCompiler* compiler,
+ const DexCompilationUnit& unit,
+ const CompilationLevel compilation_level,
+ const std::vector<uint8_t>* quicken_data);
const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
return quickened_info_;
}
- private:
- const DexFile& GetDexFile() const {
- return *unit_.GetDexFile();
- }
+ // Returns the quickening info, or an empty array if it was not quickened.
+ // If already_quickened is true, then don't change anything but still return what the quicken
+ // data would have been.
+ std::vector<uint8_t> Compile();
+
+ const DexFile& GetDexFile() const;
// Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
// a barrier is required.
@@ -93,32 +90,133 @@ class DexCompiler {
// Compiles a virtual method invocation into a quick virtual method invocation.
// The method index is replaced by the vtable index where the corresponding
- // Executable can be found. Therefore, this does not involve any resolution
+ // executable can be found. Therefore, this does not involve any resolution
// at runtime.
// Since the method index is encoded with 16 bits, we can replace it only if the
// vtable index can be encoded with 16 bits too.
void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
Instruction::Code new_opcode, bool is_range);
+ // Return the next index.
+ uint16_t NextIndex();
+
+ // Returns the dequickened index if an instruction is quickened, otherwise return index.
+ uint16_t GetIndexForInstruction(const Instruction* inst, uint32_t index);
+
+ DexToDexCompiler* const compiler_;
CompilerDriver& driver_;
const DexCompilationUnit& unit_;
- const DexToDexCompilationLevel dex_to_dex_compilation_level_;
+ const CompilationLevel compilation_level_;
// Filled by the compiler when quickening, in order to encode that information
// in the .oat file. The runtime will use that information to get to the original
// opcodes.
std::vector<QuickenedInfo> quickened_info_;
- DISALLOW_COPY_AND_ASSIGN(DexCompiler);
+ // True if we optimized a return void to a return void no barrier.
+ bool optimized_return_void_ = false;
+
+ // If the code item was already quickened previously.
+ const bool already_quickened_;
+ const QuickenInfoTable existing_quicken_info_;
+ uint32_t quicken_index_ = 0u;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationState);
};
-void DexCompiler::Compile() {
- DCHECK_EQ(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kOptimize);
- IterationRange<DexInstructionIterator> instructions(unit_.GetCodeItemAccessor().begin(),
- unit_.GetCodeItemAccessor().end());
+DexToDexCompiler::DexToDexCompiler(CompilerDriver* driver)
+ : driver_(driver),
+ lock_("Quicken lock", kDexToDexCompilerLock) {
+ DCHECK(driver != nullptr);
+}
+
+void DexToDexCompiler::ClearState() {
+ MutexLock lock(Thread::Current(), lock_);
+ active_dex_file_ = nullptr;
+ active_bit_vector_ = nullptr;
+ should_quicken_.clear();
+ shared_code_item_quicken_info_.clear();
+}
+
+size_t DexToDexCompiler::NumCodeItemsToQuicken(Thread* self) const {
+ MutexLock lock(self, lock_);
+ return num_code_items_;
+}
+
+BitVector* DexToDexCompiler::GetOrAddBitVectorForDex(const DexFile* dex_file) {
+ if (active_dex_file_ != dex_file) {
+ active_dex_file_ = dex_file;
+ auto inserted = should_quicken_.emplace(dex_file,
+ BitVector(dex_file->NumMethodIds(),
+ /*expandable*/ false,
+ Allocator::GetMallocAllocator()));
+ active_bit_vector_ = &inserted.first->second;
+ }
+ return active_bit_vector_;
+}
+
+void DexToDexCompiler::MarkForCompilation(Thread* self,
+ const MethodReference& method_ref) {
+ MutexLock lock(self, lock_);
+ BitVector* const bitmap = GetOrAddBitVectorForDex(method_ref.dex_file);
+ DCHECK(bitmap != nullptr);
+ DCHECK(!bitmap->IsBitSet(method_ref.index));
+ bitmap->SetBit(method_ref.index);
+ ++num_code_items_;
+}
+
+DexToDexCompiler::CompilationState::CompilationState(DexToDexCompiler* compiler,
+ const DexCompilationUnit& unit,
+ const CompilationLevel compilation_level,
+ const std::vector<uint8_t>* quicken_data)
+ : compiler_(compiler),
+ driver_(*compiler->GetDriver()),
+ unit_(unit),
+ compilation_level_(compilation_level),
+ already_quickened_(quicken_data != nullptr),
+ existing_quicken_info_(already_quickened_
+ ? ArrayRef<const uint8_t>(*quicken_data) : ArrayRef<const uint8_t>()) {}
+
+uint16_t DexToDexCompiler::CompilationState::NextIndex() {
+ DCHECK(already_quickened_);
+ if (kIsDebugBuild && quicken_index_ >= existing_quicken_info_.NumIndices()) {
+ for (const DexInstructionPcPair& pair : unit_.GetCodeItemAccessor()) {
+ LOG(ERROR) << pair->DumpString(nullptr);
+ }
+ LOG(FATAL) << "Mismatched number of quicken slots.";
+ }
+ const uint16_t ret = existing_quicken_info_.GetData(quicken_index_);
+ quicken_index_++;
+ return ret;
+}
+
+uint16_t DexToDexCompiler::CompilationState::GetIndexForInstruction(const Instruction* inst,
+ uint32_t index) {
+ if (UNLIKELY(already_quickened_)) {
+ return inst->IsQuickened() ? NextIndex() : index;
+ }
+ DCHECK(!inst->IsQuickened());
+ return index;
+}
+
+bool DexToDexCompiler::ShouldCompileMethod(const MethodReference& ref) {
+ // TODO: It's probably safe to avoid the lock here if the active_dex_file_ matches since we only
+ // only call ShouldCompileMethod on one dex at a time.
+ MutexLock lock(Thread::Current(), lock_);
+ return GetOrAddBitVectorForDex(ref.dex_file)->IsBitSet(ref.index);
+}
+
+std::vector<uint8_t> DexToDexCompiler::CompilationState::Compile() {
+ DCHECK_EQ(compilation_level_, CompilationLevel::kOptimize);
+ const CodeItemDataAccessor& instructions = unit_.GetCodeItemAccessor();
for (DexInstructionIterator it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.DexPc();
Instruction* inst = const_cast<Instruction*>(&it.Inst());
+
+ if (!already_quickened_) {
+ DCHECK(!inst->IsQuickened());
+ }
+
switch (inst->Opcode()) {
case Instruction::RETURN_VOID:
CompileReturnVoid(inst, dex_pc);
@@ -134,84 +232,147 @@ void DexCompiler::Compile() {
break;
case Instruction::IGET:
+ case Instruction::IGET_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false);
break;
case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false);
break;
case Instruction::IGET_OBJECT:
+ case Instruction::IGET_OBJECT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false);
break;
case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BOOLEAN_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN_QUICK, false);
break;
case Instruction::IGET_BYTE:
+ case Instruction::IGET_BYTE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE_QUICK, false);
break;
case Instruction::IGET_CHAR:
+ case Instruction::IGET_CHAR_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR_QUICK, false);
break;
case Instruction::IGET_SHORT:
+ case Instruction::IGET_SHORT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT_QUICK, false);
break;
case Instruction::IPUT:
+ case Instruction::IPUT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
break;
case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BOOLEAN_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true);
break;
case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_BYTE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true);
break;
case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_CHAR_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true);
break;
case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_SHORT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true);
break;
case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE_QUICK, true);
break;
case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_OBJECT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT_QUICK, true);
break;
case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_QUICK, false);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, true);
break;
case Instruction::NOP:
- // We need to differentiate between check cast inserted NOP and normal NOP, put an invalid
- // index in the map for normal nops. This should be rare in real code.
- quickened_info_.push_back(QuickenedInfo(dex_pc, DexFile::kDexNoIndex16));
+ if (already_quickened_) {
+ const uint16_t reference_index = NextIndex();
+ quickened_info_.push_back(QuickenedInfo(dex_pc, reference_index));
+ if (reference_index == DexFile::kDexNoIndex16) {
+ // This means it was a normal nop and not a check-cast.
+ break;
+ }
+ const uint16_t type_index = NextIndex();
+ if (driver_.IsSafeCast(&unit_, dex_pc)) {
+ quickened_info_.push_back(QuickenedInfo(dex_pc, type_index));
+ }
+ ++it;
+ } else {
+ // We need to differentiate between check cast inserted NOP and normal NOP, put an invalid
+ // index in the map for normal nops. This should be rare in real code.
+ quickened_info_.push_back(QuickenedInfo(dex_pc, DexFile::kDexNoIndex16));
+ }
break;
default:
- DCHECK(!inst->IsQuickened());
// Nothing to do.
break;
}
}
+
+ if (already_quickened_) {
+ DCHECK_EQ(quicken_index_, existing_quicken_info_.NumIndices());
+ }
+
+ if (GetQuickenedInfo().empty()) {
+ // No need to create a CompiledMethod if there are no quickened opcodes.
+ return std::vector<uint8_t>();
+ }
+
+ std::vector<uint8_t> quicken_data;
+ if (kIsDebugBuild) {
+ // Double check that the counts line up with the size of the quicken info.
+ size_t quicken_count = 0;
+ for (const DexInstructionPcPair& pair : instructions) {
+ if (QuickenInfoTable::NeedsIndexForInstruction(&pair.Inst())) {
+ ++quicken_count;
+ }
+ }
+ CHECK_EQ(quicken_count, GetQuickenedInfo().size());
+ }
+
+ QuickenInfoTable::Builder builder(&quicken_data, GetQuickenedInfo().size());
+ // Length is encoded by the constructor.
+ for (const CompilationState::QuickenedInfo& info : GetQuickenedInfo()) {
+ // Dex pc is not serialized, only used for checking the instructions. Since we access the
+ // array based on the index of the quickened instruction, the indexes must line up perfectly.
+ // The reader side uses the NeedsIndexForInstruction function too.
+ const Instruction& inst = instructions.InstructionAt(info.dex_pc);
+ CHECK(QuickenInfoTable::NeedsIndexForInstruction(&inst)) << inst.Opcode();
+ builder.AddIndex(info.dex_member_index);
+ }
+ DCHECK(!quicken_data.empty());
+ return quicken_data;
}
-void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
+void DexToDexCompiler::CompilationState::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
if (unit_.IsConstructor()) {
// Are we compiling a non clinit constructor which needs a barrier ?
@@ -227,9 +388,11 @@ void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER);
+ optimized_return_void_ = true;
}
-Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
+Instruction* DexToDexCompiler::CompilationState::CompileCheckCast(Instruction* inst,
+ uint32_t dex_pc) {
if (!kEnableCheckCastEllision) {
return inst;
}
@@ -246,27 +409,30 @@ Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
<< " by replacing it with 2 NOPs at dex pc "
<< StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c()));
- quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c()));
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(Instruction::NOP);
- inst->SetVRegA_10x(0u); // keep compliant with verifier.
- // Get to next instruction which is the second half of check-cast and replace
- // it by a NOP.
- inst = const_cast<Instruction*>(inst->Next());
- inst->SetOpcode(Instruction::NOP);
- inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ if (!already_quickened_) {
+ quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c()));
+ quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c()));
+
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(Instruction::NOP);
+ inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ // Get to next instruction which is the second half of check-cast and replace
+ // it by a NOP.
+ inst = const_cast<Instruction*>(inst->Next());
+ inst->SetOpcode(Instruction::NOP);
+ inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ }
return inst;
}
-void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
- uint32_t dex_pc,
- Instruction::Code new_opcode,
- bool is_put) {
+void DexToDexCompiler::CompilationState::CompileInstanceFieldAccess(Instruction* inst,
+ uint32_t dex_pc,
+ Instruction::Code new_opcode,
+ bool is_put) {
if (!kEnableQuickening) {
return;
}
- uint32_t field_idx = inst->VRegC_22c();
+ uint32_t field_idx = GetIndexForInstruction(inst, inst->VRegC_22c());
MemberOffset field_offset(0u);
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
@@ -278,20 +444,29 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
<< " by field offset " << field_offset.Int32Value()
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(new_opcode);
- // Replace field index by field offset.
- inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
+ if (!already_quickened_) {
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(new_opcode);
+ // Replace field index by field offset.
+ inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
+ }
quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx));
}
}
-void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_range) {
+const DexFile& DexToDexCompiler::CompilationState::GetDexFile() const {
+ return *unit_.GetDexFile();
+}
+
+void DexToDexCompiler::CompilationState::CompileInvokeVirtual(Instruction* inst,
+ uint32_t dex_pc,
+ Instruction::Code new_opcode,
+ bool is_range) {
if (!kEnableQuickening) {
return;
}
- uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ uint32_t method_idx = GetIndexForInstruction(inst,
+ is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = unit_.GetClassLinker();
@@ -318,19 +493,20 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
<< " by vtable index " << vtable_idx
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(new_opcode);
- // Replace method index by vtable index.
- if (is_range) {
- inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
- } else {
- inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
+ if (!already_quickened_) {
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(new_opcode);
+ // Replace method index by vtable index.
+ if (is_range) {
+ inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
+ } else {
+ inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
+ }
}
quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx));
}
-CompiledMethod* ArtCompileDEX(
- CompilerDriver* driver,
+CompiledMethod* DexToDexCompiler::CompileMethod(
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type ATTRIBUTE_UNUSED,
@@ -338,69 +514,175 @@ CompiledMethod* ArtCompileDEX(
uint32_t method_idx,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level) {
- DCHECK(driver != nullptr);
- if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- art::DexCompilationUnit unit(
- class_loader,
- class_linker,
- dex_file,
- code_item,
- class_def_idx,
- method_idx,
- access_flags,
- driver->GetVerifiedMethod(&dex_file, method_idx),
- hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
- art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level);
- dex_compiler.Compile();
- if (dex_compiler.GetQuickenedInfo().empty()) {
- // No need to create a CompiledMethod if there are no quickened opcodes.
+ CompilationLevel compilation_level) {
+ if (compilation_level == CompilationLevel::kDontDexToDexCompile) {
+ return nullptr;
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ art::DexCompilationUnit unit(
+ class_loader,
+ class_linker,
+ dex_file,
+ code_item,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ driver_->GetVerifiedMethod(&dex_file, method_idx),
+ hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
+
+ std::vector<uint8_t> quicken_data;
+ // If the code item is shared with multiple different method ids, make sure that we quicken only
+ // once and verify that all the dequicken maps match.
+ if (UNLIKELY(shared_code_items_.find(code_item) != shared_code_items_.end())) {
+ // Avoid quickening the shared code items for now because the existing conflict detection logic
+ // does not currently handle cases where the code item is quickened in one place but
+ // compiled in another.
+ static constexpr bool kAvoidQuickeningSharedCodeItems = true;
+ if (kAvoidQuickeningSharedCodeItems) {
return nullptr;
}
+ // For shared code items, use a lock to prevent races.
+ MutexLock mu(soa.Self(), lock_);
+ auto existing = shared_code_item_quicken_info_.find(code_item);
+ QuickenState* existing_data = nullptr;
+ std::vector<uint8_t>* existing_quicken_data = nullptr;
+ if (existing != shared_code_item_quicken_info_.end()) {
+ existing_data = &existing->second;
+ if (existing_data->conflict_) {
+ return nullptr;
+ }
+ existing_quicken_data = &existing_data->quicken_data_;
+ }
+ bool optimized_return_void;
+ {
+ CompilationState state(this, unit, compilation_level, existing_quicken_data);
+ quicken_data = state.Compile();
+ optimized_return_void = state.optimized_return_void_;
+ }
- // Create a `CompiledMethod`, with the quickened information in the vmap table.
+ // Already quickened, check that the data matches what was previously seen.
+ MethodReference method_ref(&dex_file, method_idx);
+ if (existing_data != nullptr) {
+ if (*existing_quicken_data != quicken_data ||
+ existing_data->optimized_return_void_ != optimized_return_void) {
+ VLOG(compiler) << "Quicken data mismatch, for method "
+ << dex_file.PrettyMethod(method_idx);
+ // Mark the method as a conflict to never attempt to quicken it in the future.
+ existing_data->conflict_ = true;
+ }
+ existing_data->methods_.push_back(method_ref);
+ } else {
+ QuickenState new_state;
+ new_state.methods_.push_back(method_ref);
+ new_state.quicken_data_ = quicken_data;
+ new_state.optimized_return_void_ = optimized_return_void;
+ bool inserted = shared_code_item_quicken_info_.emplace(code_item, new_state).second;
+ CHECK(inserted) << "Failed to insert " << dex_file.PrettyMethod(method_idx);
+ }
+
+ // Easy sanity check is to check that the existing stuff matches by re-quickening using the
+ // newly produced quicken data.
+ // Note that this needs to be behind the lock for this case since we may unquicken in another
+ // thread.
if (kIsDebugBuild) {
- // Double check that the counts line up with the size of the quicken info.
- size_t quicken_count = 0;
- for (const DexInstructionPcPair& pair : unit.GetCodeItemAccessor()) {
- if (QuickenInfoTable::NeedsIndexForInstruction(&pair.Inst())) {
- ++quicken_count;
+ CompilationState state2(this, unit, compilation_level, &quicken_data);
+ std::vector<uint8_t> new_data = state2.Compile();
+ CHECK(new_data == quicken_data) << "Mismatch producing new quicken data";
+ }
+ } else {
+ CompilationState state(this, unit, compilation_level, /*quicken_data*/ nullptr);
+ quicken_data = state.Compile();
+
+ // Easy sanity check is to check that the existing stuff matches by re-quickening using the
+ // newly produced quicken data.
+ if (kIsDebugBuild) {
+ CompilationState state2(this, unit, compilation_level, &quicken_data);
+ std::vector<uint8_t> new_data = state2.Compile();
+ CHECK(new_data == quicken_data) << "Mismatch producing new quicken data";
+ }
+ }
+
+ if (quicken_data.empty()) {
+ return nullptr;
+ }
+
+ // Create a `CompiledMethod`, with the quickened information in the vmap table.
+ InstructionSet instruction_set = driver_->GetInstructionSet();
+ if (instruction_set == InstructionSet::kThumb2) {
+ // Don't use the thumb2 instruction set to avoid the one off code delta.
+ instruction_set = InstructionSet::kArm;
+ }
+ CompiledMethod* ret = CompiledMethod::SwapAllocCompiledMethod(
+ driver_,
+ instruction_set,
+ ArrayRef<const uint8_t>(), // no code
+ 0,
+ 0,
+ 0,
+ ArrayRef<const uint8_t>(), // method_info
+ ArrayRef<const uint8_t>(quicken_data), // vmap_table
+ ArrayRef<const uint8_t>(), // cfi data
+ ArrayRef<const linker::LinkerPatch>());
+ DCHECK(ret != nullptr);
+ return ret;
+}
+
+void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) {
+ // Record what code items are already seen to detect when multiple methods have the same code
+ // item.
+ std::unordered_set<const DexFile::CodeItem*> seen_code_items;
+ for (const DexFile* dex_file : dex_files) {
+ for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ it.SkipAllFields();
+ for (; it.HasNextMethod(); it.Next()) {
+ const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
+ // Detect the shared code items.
+ if (!seen_code_items.insert(code_item).second) {
+ shared_code_items_.insert(code_item);
}
}
- CHECK_EQ(quicken_count, dex_compiler.GetQuickenedInfo().size());
- }
- std::vector<uint8_t> quicken_data;
- for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) {
- // Dex pc is not serialized, only used for checking the instructions. Since we access the
- // array based on the index of the quickened instruction, the indexes must line up perfectly.
- // The reader side uses the NeedsIndexForInstruction function too.
- const Instruction& inst = unit.GetCodeItemAccessor().InstructionAt(info.dex_pc);
- CHECK(QuickenInfoTable::NeedsIndexForInstruction(&inst)) << inst.Opcode();
- // Add the index.
- quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 0));
- quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8));
}
- InstructionSet instruction_set = driver->GetInstructionSet();
- if (instruction_set == InstructionSet::kThumb2) {
- // Don't use the thumb2 instruction set to avoid the one off code delta.
- instruction_set = InstructionSet::kArm;
+ }
+ VLOG(compiler) << "Shared code items " << shared_code_items_.size();
+}
+
+void DexToDexCompiler::UnquickenConflictingMethods() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t unquicken_count = 0;
+ for (const auto& pair : shared_code_item_quicken_info_) {
+ const DexFile::CodeItem* code_item = pair.first;
+ const QuickenState& state = pair.second;
+ CHECK_GE(state.methods_.size(), 1u);
+ if (state.conflict_) {
+ // Unquicken using the existing quicken data.
+ // TODO: Do we really need to pass a dex file in?
+ optimizer::ArtDecompileDEX(*state.methods_[0].dex_file,
+ *code_item,
+ ArrayRef<const uint8_t>(state.quicken_data_),
+ /* decompile_return_instruction*/ true);
+ ++unquicken_count;
+ // Go clear the vmaps for all the methods that were already quickened to avoid writing them
+ // out during oat writing.
+ for (const MethodReference& ref : state.methods_) {
+ CompiledMethod* method = driver_->RemoveCompiledMethod(ref);
+ if (method != nullptr) {
+ // There is up to one compiled method for each method ref. Releasing it leaves the
+ // deduped data intact, this means its safe to do even when other threads might be
+ // compiling.
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_, method);
+ }
+ }
}
- return CompiledMethod::SwapAllocCompiledMethod(
- driver,
- instruction_set,
- ArrayRef<const uint8_t>(), // no code
- 0,
- 0,
- 0,
- ArrayRef<const uint8_t>(), // method_info
- ArrayRef<const uint8_t>(quicken_data), // vmap_table
- ArrayRef<const uint8_t>(), // cfi data
- ArrayRef<const linker::LinkerPatch>());
}
- return nullptr;
}
} // namespace optimizer
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 80b94d2dc3..7df09f140c 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -17,14 +17,22 @@
#ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "base/bit_vector.h"
#include "dex/dex_file.h"
+#include "dex/invoke_type.h"
#include "handle.h"
-#include "invoke_type.h"
+#include "method_reference.h"
+#include "quicken_info.h"
namespace art {
class CompiledMethod;
class CompilerDriver;
+class DexCompilationUnit;
namespace mirror {
class ClassLoader;
@@ -32,21 +40,79 @@ class ClassLoader;
namespace optimizer {
-enum class DexToDexCompilationLevel {
- kDontDexToDexCompile, // Only meaning wrt image time interpretation.
- kOptimize // Perform peep-hole optimizations.
+class DexToDexCompiler {
+ public:
+ enum class CompilationLevel {
+ kDontDexToDexCompile, // Only meaning wrt image time interpretation.
+ kOptimize // Perform peep-hole optimizations.
+ };
+
+ explicit DexToDexCompiler(CompilerDriver* driver);
+
+ CompiledMethod* CompileMethod(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ const CompilationLevel compilation_level) WARN_UNUSED;
+
+ void MarkForCompilation(Thread* self,
+ const MethodReference& method_ref);
+
+ void ClearState();
+
+ // Unquicken all methods that have conflicting quicken info. This is not done during the
+ // quickening process to avoid race conditions.
+ void UnquickenConflictingMethods();
+
+ CompilerDriver* GetDriver() {
+ return driver_;
+ }
+
+ bool ShouldCompileMethod(const MethodReference& ref);
+
+ // Return the number of code items to quicken.
+ size_t NumCodeItemsToQuicken(Thread* self) const;
+
+ void SetDexFiles(const std::vector<const DexFile*>& dex_files);
+
+ private:
+ // Holds the state for compiling a single method.
+ struct CompilationState;
+
+ // Quicken state for a code item, may be referenced by multiple methods.
+ struct QuickenState {
+ std::vector<MethodReference> methods_;
+ std::vector<uint8_t> quicken_data_;
+ bool optimized_return_void_ = false;
+ bool conflict_ = false;
+ };
+
+ BitVector* GetOrAddBitVectorForDex(const DexFile* dex_file) REQUIRES(lock_);
+
+ CompilerDriver* const driver_;
+
+ // State for adding methods (should this be in its own class?).
+ const DexFile* active_dex_file_ = nullptr;
+ BitVector* active_bit_vector_ = nullptr;
+
+ // Lock that guards duplicate code items and the bitmap.
+ mutable Mutex lock_;
+ // Record what method references are going to get quickened.
+ std::unordered_map<const DexFile*, BitVector> should_quicken_;
+ // Guarded by lock_ during writing, accessed without a lock during quickening.
+ // This is safe because no thread is adding to the shared code items during the quickening phase.
+ std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
+ // Blacklisted code items are unquickened in UnquickenConflictingMethods.
+ std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
+ GUARDED_BY(lock_);
+ // Number of added code items.
+ size_t num_code_items_ GUARDED_BY(lock_) = 0u;
};
-std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
-
-CompiledMethod* ArtCompileDEX(CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level);
+
+std::ostream& operator<<(std::ostream& os, const DexToDexCompiler::CompilationLevel& rhs);
} // namespace optimizer
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index ce67b85b99..dc044c1210 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -142,7 +142,7 @@ ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_dir
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
if (kIsDebugBuild) {
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
DCHECK_EQ(invoke_direct->VRegC_35c(),
accessor.RegistersSize() - accessor.InsSize());
}
@@ -324,9 +324,9 @@ bool DoAnalyseConstructor(const CodeItemDataAccessor* code_item,
return false;
}
if (target_method->GetDeclaringClass()->IsObjectClass()) {
- DCHECK_EQ(CodeItemDataAccessor(target_method).begin()->Opcode(), Instruction::RETURN_VOID);
+ DCHECK_EQ(target_method->DexInstructionData().begin()->Opcode(), Instruction::RETURN_VOID);
} else {
- CodeItemDataAccessor target_code_item(target_method);
+ CodeItemDataAccessor target_code_item(target_method->DexInstructionData());
if (!target_code_item.HasCodeItem()) {
return false; // Native constructor?
}
@@ -430,7 +430,7 @@ static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) ==
InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT), "iget/iput_short variant");
bool InlineMethodAnalyser::AnalyseMethodCode(ArtMethod* method, InlineMethod* result) {
- CodeItemDataAccessor code_item(method);
+ CodeItemDataAccessor code_item(method->DexInstructionData());
if (!code_item.HasCodeItem()) {
// Native or abstract.
return false;
diff --git a/compiler/dex/quick_compiler_callbacks.cc b/compiler/dex/quick_compiler_callbacks.cc
index 540bd0ce45..baf97a852e 100644
--- a/compiler/dex/quick_compiler_callbacks.cc
+++ b/compiler/dex/quick_compiler_callbacks.cc
@@ -17,6 +17,10 @@
#include "quick_compiler_callbacks.h"
#include "driver/compiler_driver.h"
+#include "mirror/class-inl.h"
+#include "mirror/object.h"
+#include "obj_ptr-inl.h"
+#include "thread-current-inl.h"
#include "verification_results.h"
#include "verifier/method_verifier-inl.h"
@@ -54,4 +58,15 @@ void QuickCompilerCallbacks::UpdateClassState(ClassReference ref, ClassStatus st
}
}
+bool QuickCompilerCallbacks::CanUseOatStatusForVerification(mirror::Class* klass) {
+ // No dex files: conservatively false.
+ if (dex_files_ == nullptr) {
+ return false;
+ }
+
+ // If the class isn't from one of the dex files, accept oat file data.
+ const DexFile* dex_file = &klass->GetDexFile();
+ return std::find(dex_files_->begin(), dex_files_->end(), dex_file) == dex_files_->end();
+}
+
} // namespace art
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 6d22f955a3..8a07e9c12c 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -23,12 +23,13 @@
namespace art {
class CompilerDriver;
+class DexFile;
class VerificationResults;
class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
public:
explicit QuickCompilerCallbacks(CompilerCallbacks::CallbackMode mode)
- : CompilerCallbacks(mode) {}
+ : CompilerCallbacks(mode), dex_files_(nullptr) {}
~QuickCompilerCallbacks() { }
@@ -65,11 +66,19 @@ class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
void UpdateClassState(ClassReference ref, ClassStatus state) OVERRIDE;
+ bool CanUseOatStatusForVerification(mirror::Class* klass) OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void SetDexFiles(const std::vector<const DexFile*>* dex_files) {
+ dex_files_ = dex_files;
+ }
+
private:
VerificationResults* verification_results_ = nullptr;
bool does_class_unloading_ = false;
CompilerDriver* compiler_driver_ = nullptr;
std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
+ const std::vector<const DexFile*>* dex_files_;
};
} // namespace art
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index c8c2b6998f..48477abe5b 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -137,16 +137,7 @@ class CompiledMethodStorage::DedupeHashFunc {
return hash;
} else {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
+ return HashBytes(data, len);
}
}
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c0886d0185..fb428b8d9a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -56,6 +56,7 @@
#include "gc/space/space.h"
#include "handle_scope-inl.h"
#include "intrinsics_enum.h"
+#include "jit/profile_compilation_info.h"
#include "jni_internal.h"
#include "linker/linker_patch.h"
#include "mirror/class-inl.h"
@@ -255,24 +256,6 @@ class CompilerDriver::AOTCompilationStats {
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
-class CompilerDriver::DexFileMethodSet {
- public:
- explicit DexFileMethodSet(const DexFile& dex_file)
- : dex_file_(dex_file),
- method_indexes_(dex_file.NumMethodIds(), false, Allocator::GetMallocAllocator()) {
- }
- DexFileMethodSet(DexFileMethodSet&& other) = default;
-
- const DexFile& GetDexFile() const { return dex_file_; }
-
- BitVector& GetMethodIndexes() { return method_indexes_; }
- const BitVector& GetMethodIndexes() const { return method_indexes_; }
-
- private:
- const DexFile& dex_file_;
- BitVector method_indexes_;
-};
-
CompilerDriver::CompilerDriver(
const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -306,9 +289,7 @@ CompilerDriver::CompilerDriver(
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
max_arena_alloc_(0),
- dex_to_dex_references_lock_("dex-to-dex references lock"),
- dex_to_dex_references_(),
- current_dex_to_dex_methods_(nullptr) {
+ dex_to_dex_compiler_(this) {
DCHECK(compiler_options_ != nullptr);
compiler_->Init();
@@ -398,10 +379,16 @@ void CompilerDriver::CompileAll(jobject class_loader,
FreeThreadPools();
}
-static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
+static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& class_def)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // When the dex file is uncompressed in the APK, we do not generate a copy in the .vdex
+ // file. As a result, dex2oat will map the dex file read-only, and we only need to check
+ // that to know if we can do quickening.
+ if (dex_file.GetContainer() != nullptr && dex_file.GetContainer()->IsReadOnly()) {
+ return optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
+ }
auto* const runtime = Runtime::Current();
DCHECK(driver.GetCompilerOptions().IsQuickeningCompilationEnabled());
const char* descriptor = dex_file.GetClassDescriptor(class_def);
@@ -410,7 +397,7 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
if (klass == nullptr) {
CHECK(self->IsExceptionPending());
self->ClearException();
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ return optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
// DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic
// references with actual offsets. We cannot re-verify such instructions.
@@ -418,26 +405,23 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
// We store the verification information in the class status in the oat file, which the linker
// can validate (checksums) and use to skip load-time verification. It is thus safe to
// optimize when a class has been fully verified before.
- optimizer::DexToDexCompilationLevel max_level = optimizer::DexToDexCompilationLevel::kOptimize;
+ optimizer::DexToDexCompiler::CompilationLevel max_level =
+ optimizer::DexToDexCompiler::CompilationLevel::kOptimize;
if (driver.GetCompilerOptions().GetDebuggable()) {
// We are debuggable so definitions of classes might be changed. We don't want to do any
// optimizations that could break that.
- max_level = optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
- }
- if (!VdexFile::CanEncodeQuickenedData(dex_file)) {
- // Don't do any dex level optimizations if we cannot encode the quickening.
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ max_level = optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return max_level;
} else {
// Class verification has failed: do not run DEX-to-DEX optimizations.
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ return optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
}
-static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
+static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self,
const CompilerDriver& driver,
jobject jclass_loader,
@@ -465,97 +449,39 @@ static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
}
}
-static void CompileMethod(Thread* self,
- CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file,
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
- Handle<mirror::DexCache> dex_cache) {
+template <typename CompileFn>
+static void CompileMethodHarness(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache,
+ CompileFn compile_fn) {
DCHECK(driver != nullptr);
- CompiledMethod* compiled_method = nullptr;
+ CompiledMethod* compiled_method;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
MethodReference method_ref(&dex_file, method_idx);
- if (driver->GetCurrentDexToDexMethods() != nullptr) {
- // This is the second pass when we dex-to-dex compile previously marked methods.
- // TODO: Refactor the compilation to avoid having to distinguish the two passes
- // here. That should be done on a higher level. http://b/29089975
- if (driver->GetCurrentDexToDexMethods()->IsBitSet(method_idx)) {
- VerificationResults* results = driver->GetVerificationResults();
- DCHECK(results != nullptr);
- const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
- // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
- // for example, relies on it.
- compiled_method = optimizer::ArtCompileDEX(
- driver,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file,
- (verified_method != nullptr)
- ? dex_to_dex_compilation_level
- : optimizer::DexToDexCompilationLevel::kDontDexToDexCompile);
- }
- } else if ((access_flags & kAccNative) != 0) {
- // Are we extracting only and have support for generic JNI down calls?
- if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
- InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
- // Leaving this empty will trigger the generic JNI version
- } else {
- // Query any JNI optimization annotations such as @FastNative or @CriticalNative.
- access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
- dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
+ compiled_method = compile_fn(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
- compiled_method = driver->GetCompiler()->JniCompile(
- access_flags, method_idx, dex_file, dex_cache);
- CHECK(compiled_method != nullptr);
- }
- } else if ((access_flags & kAccAbstract) != 0) {
- // Abstract methods don't have code.
- } else {
- VerificationResults* results = driver->GetVerificationResults();
- DCHECK(results != nullptr);
- const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
- bool compile = compilation_enabled &&
- // Basic checks, e.g., not <clinit>.
- results->IsCandidateForCompilation(method_ref, access_flags) &&
- // Did not fail to create VerifiedMethod metadata.
- verified_method != nullptr &&
- // Do not have failures that should punt to the interpreter.
- !verified_method->HasRuntimeThrow() &&
- (verified_method->GetEncounteredVerificationFailures() &
- (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 &&
- // Is eligable for compilation by methods-to-compile filter.
- driver->IsMethodToCompile(method_ref) &&
- driver->ShouldCompileBasedOnProfile(method_ref);
-
- if (compile) {
- // NOTE: if compiler declines to compile this method, it will return null.
- compiled_method = driver->GetCompiler()->Compile(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file,
- dex_cache);
- }
- if (compiled_method == nullptr &&
- dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
- DCHECK(!Runtime::Current()->UseJitCompilation());
- // TODO: add a command-line option to disable DEX-to-DEX compilation ?
- driver->MarkForDexToDexCompilation(self, method_ref);
- }
- }
if (kTimeCompileMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
if (duration_ns > MsToNs(driver->GetCompiler()->GetMaximumCompilationTimeBeforeWarning())) {
@@ -586,6 +512,170 @@ static void CompileMethod(Thread* self,
}
}
+static void CompileMethodDex2Dex(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ auto dex_2_dex_fn = [](Thread* self ATTRIBUTE_UNUSED,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled ATTRIBUTE_UNUSED,
+ Handle<mirror::DexCache> dex_cache ATTRIBUTE_UNUSED) -> CompiledMethod* {
+ DCHECK(driver != nullptr);
+ MethodReference method_ref(&dex_file, method_idx);
+
+ optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
+
+ if (compiler->ShouldCompileMethod(method_ref)) {
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
+ // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
+ // for example, relies on it.
+ return compiler->CompileMethod(
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ (verified_method != nullptr)
+ ? dex_to_dex_compilation_level
+ : optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile);
+ }
+ return nullptr;
+ };
+ CompileMethodHarness(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache,
+ dex_2_dex_fn);
+}
+
+static void CompileMethodQuick(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ auto quick_fn = [](
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ DCHECK(driver != nullptr);
+ CompiledMethod* compiled_method = nullptr;
+ MethodReference method_ref(&dex_file, method_idx);
+
+ if ((access_flags & kAccNative) != 0) {
+ // Are we extracting only and have support for generic JNI down calls?
+ if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
+ InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
+ // Leaving this empty will trigger the generic JNI version
+ } else {
+ // Query any JNI optimization annotations such as @FastNative or @CriticalNative.
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
+
+ compiled_method = driver->GetCompiler()->JniCompile(
+ access_flags, method_idx, dex_file, dex_cache);
+ CHECK(compiled_method != nullptr);
+ }
+ } else if ((access_flags & kAccAbstract) != 0) {
+ // Abstract methods don't have code.
+ } else {
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
+ bool compile = compilation_enabled &&
+ // Basic checks, e.g., not <clinit>.
+ results->IsCandidateForCompilation(method_ref, access_flags) &&
+ // Did not fail to create VerifiedMethod metadata.
+ verified_method != nullptr &&
+ // Do not have failures that should punt to the interpreter.
+ !verified_method->HasRuntimeThrow() &&
+ (verified_method->GetEncounteredVerificationFailures() &
+ (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 &&
+ // Is eligable for compilation by methods-to-compile filter.
+ driver->IsMethodToCompile(method_ref) &&
+ driver->ShouldCompileBasedOnProfile(method_ref);
+
+ if (compile) {
+ // NOTE: if compiler declines to compile this method, it will return null.
+ compiled_method = driver->GetCompiler()->Compile(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_cache);
+ }
+ if (compiled_method == nullptr &&
+ dex_to_dex_compilation_level !=
+ optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile) {
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ // TODO: add a command-line option to disable DEX-to-DEX compilation ?
+ driver->GetDexToDexCompiler().MarkForCompilation(self, method_ref);
+ }
+ }
+ return compiled_method;
+ };
+ CompileMethodHarness(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache,
+ quick_fn);
+}
+
void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
jobject jclass_loader;
@@ -620,53 +710,42 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
PreCompile(jclass_loader, dex_files, timings);
// Can we run DEX-to-DEX compiler on this class ?
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(self,
*this,
jclass_loader,
*dex_file,
dex_file->GetClassDef(class_def_idx));
- DCHECK(current_dex_to_dex_methods_ == nullptr);
- CompileMethod(self,
- this,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- *dex_file,
- dex_to_dex_compilation_level,
- true,
- dex_cache);
-
- ArrayRef<DexFileMethodSet> dex_to_dex_references;
- {
- // From this point on, we shall not modify dex_to_dex_references_, so
- // just grab a reference to it that we use without holding the mutex.
- MutexLock lock(Thread::Current(), dex_to_dex_references_lock_);
- dex_to_dex_references = ArrayRef<DexFileMethodSet>(dex_to_dex_references_);
- }
- if (!dex_to_dex_references.empty()) {
- DCHECK_EQ(dex_to_dex_references.size(), 1u);
- DCHECK(&dex_to_dex_references[0].GetDexFile() == dex_file);
- current_dex_to_dex_methods_ = &dex_to_dex_references.front().GetMethodIndexes();
- DCHECK(current_dex_to_dex_methods_->IsBitSet(method_idx));
- DCHECK_EQ(current_dex_to_dex_methods_->NumSetBits(), 1u);
- CompileMethod(self,
- this,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- *dex_file,
- dex_to_dex_compilation_level,
- true,
- dex_cache);
- current_dex_to_dex_methods_ = nullptr;
+ CompileMethodQuick(self,
+ this,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ *dex_file,
+ dex_to_dex_compilation_level,
+ true,
+ dex_cache);
+
+ const size_t num_methods = dex_to_dex_compiler_.NumCodeItemsToQuicken(self);
+ if (num_methods != 0) {
+ DCHECK_EQ(num_methods, 1u);
+ CompileMethodDex2Dex(self,
+ this,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ *dex_file,
+ dex_to_dex_compilation_level,
+ true,
+ dex_cache);
+ dex_to_dex_compiler_.ClearState();
}
FreeThreadPools();
@@ -953,7 +1032,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
if (method->GetCodeItem() == nullptr) {
return; // native or abstract method
}
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
if (accessor.TriesSize() == 0) {
return; // nothing to process
}
@@ -1284,17 +1363,6 @@ bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
return IsImageClass(descriptor);
}
-void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) {
- MutexLock lock(self, dex_to_dex_references_lock_);
- // Since we're compiling one dex file at a time, we need to look for the
- // current dex file entry only at the end of dex_to_dex_references_.
- if (dex_to_dex_references_.empty() ||
- &dex_to_dex_references_.back().GetDexFile() != method_ref.dex_file) {
- dex_to_dex_references_.emplace_back(*method_ref.dex_file);
- }
- dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.index);
-}
-
bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
ObjPtr<mirror::Class> resolved_class) {
if (resolved_class == nullptr) {
@@ -1476,13 +1544,19 @@ class ParallelCompilationManager {
void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units)
REQUIRES(!*Locks::mutator_lock_) {
+ ForAllLambda(begin, end, [visitor](size_t index) { visitor->Visit(index); }, work_units);
+ }
+
+ template <typename Fn>
+ void ForAllLambda(size_t begin, size_t end, Fn fn, size_t work_units)
+ REQUIRES(!*Locks::mutator_lock_) {
Thread* self = Thread::Current();
self->AssertNoPendingException();
CHECK_GT(work_units, 0U);
index_.StoreRelaxed(begin);
for (size_t i = 0; i < work_units; ++i) {
- thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor));
+ thread_pool_->AddTask(self, new ForAllClosureLambda<Fn>(this, end, fn));
}
thread_pool_->StartWorkers(self);
@@ -1502,32 +1576,33 @@ class ParallelCompilationManager {
}
private:
- class ForAllClosure : public Task {
+ template <typename Fn>
+ class ForAllClosureLambda : public Task {
public:
- ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor)
+ ForAllClosureLambda(ParallelCompilationManager* manager, size_t end, Fn fn)
: manager_(manager),
end_(end),
- visitor_(visitor) {}
+ fn_(fn) {}
- virtual void Run(Thread* self) {
+ void Run(Thread* self) OVERRIDE {
while (true) {
const size_t index = manager_->NextIndex();
if (UNLIKELY(index >= end_)) {
break;
}
- visitor_->Visit(index);
+ fn_(index);
self->AssertNoPendingException();
}
}
- virtual void Finalize() {
+ void Finalize() OVERRIDE {
delete this;
}
private:
ParallelCompilationManager* const manager_;
const size_t end_;
- CompilationVisitor* const visitor_;
+ Fn fn_;
};
AtomicInteger index_;
@@ -2606,72 +2681,33 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
}
-void CompilerDriver::Compile(jobject class_loader,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
- if (kDebugProfileGuidedCompilation) {
- LOG(INFO) << "[ProfileGuidedCompilation] " <<
- ((profile_compilation_info_ == nullptr)
- ? "null"
- : profile_compilation_info_->DumpInfo(&dex_files));
- }
-
- current_dex_to_dex_methods_ = nullptr;
- Thread* const self = Thread::Current();
- {
- // Clear in case we aren't the first call to Compile.
- MutexLock mu(self, dex_to_dex_references_lock_);
- dex_to_dex_references_.clear();
- }
-
- for (const DexFile* dex_file : dex_files) {
- CHECK(dex_file != nullptr);
- CompileDexFile(class_loader,
- *dex_file,
- dex_files,
- parallel_thread_pool_.get(),
- parallel_thread_count_,
- timings);
- const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool();
- const size_t arena_alloc = arena_pool->GetBytesAllocated();
- max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_);
- Runtime::Current()->ReclaimArenaPoolMemory();
- }
-
- ArrayRef<DexFileMethodSet> dex_to_dex_references;
- {
- // From this point on, we shall not modify dex_to_dex_references_, so
- // just grab a reference to it that we use without holding the mutex.
- MutexLock lock(self, dex_to_dex_references_lock_);
- dex_to_dex_references = ArrayRef<DexFileMethodSet>(dex_to_dex_references_);
- }
- for (const auto& method_set : dex_to_dex_references) {
- current_dex_to_dex_methods_ = &method_set.GetMethodIndexes();
- CompileDexFile(class_loader,
- method_set.GetDexFile(),
- dex_files,
- parallel_thread_pool_.get(),
- parallel_thread_count_,
- timings);
- }
- current_dex_to_dex_methods_ = nullptr;
-
- VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
-}
-
-class CompileClassVisitor : public CompilationVisitor {
- public:
- explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+template <typename CompileFn>
+static void CompileDexFile(CompilerDriver* driver,
+ jobject class_loader,
+ const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool,
+ size_t thread_count,
+ TimingLogger* timings,
+ const char* timing_name,
+ CompileFn compile_fn) {
+ TimingLogger::ScopedTiming t(timing_name, timings);
+ ParallelCompilationManager context(Runtime::Current()->GetClassLinker(),
+ class_loader,
+ driver,
+ &dex_file,
+ dex_files,
+ thread_pool);
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ auto compile = [&context, &compile_fn](size_t class_def_index) {
ScopedTrace trace(__FUNCTION__);
- const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile& dex_file = *context.GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ClassLinker* class_linker = manager_->GetClassLinker();
- jobject jclass_loader = manager_->GetClassLoader();
+ ClassLinker* class_linker = context.GetClassLinker();
+ jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
// Skip compiling classes with generic verifier failures since they will still fail at runtime
- if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
+ if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
return;
}
// Use a scoped object access to perform to the quick SkipClass check.
@@ -2702,10 +2738,10 @@ class CompileClassVisitor : public CompilationVisitor {
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
- CompilerDriver* const driver = manager_->GetCompiler();
+ CompilerDriver* const driver = context.GetCompiler();
// Can we run DEX-to-DEX compiler on this class ?
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
ClassDataItemIterator it(dex_file, class_data);
@@ -2725,38 +2761,71 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_method_idx = method_idx;
- CompileMethod(soa.Self(),
- driver,
- it.GetMethodCodeItem(),
- it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def),
- class_def_index,
- method_idx,
- class_loader,
- dex_file,
- dex_to_dex_compilation_level,
- compilation_enabled,
- dex_cache);
+ compile_fn(soa.Self(),
+ driver,
+ it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
DCHECK(!it.HasNext());
+ };
+ context.ForAllLambda(0, dex_file.NumClassDefs(), compile, thread_count);
+}
+
+void CompilerDriver::Compile(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ if (kDebugProfileGuidedCompilation) {
+ LOG(INFO) << "[ProfileGuidedCompilation] " <<
+ ((profile_compilation_info_ == nullptr)
+ ? "null"
+ : profile_compilation_info_->DumpInfo(&dex_files));
}
- private:
- const ParallelCompilationManager* const manager_;
-};
+ dex_to_dex_compiler_.ClearState();
+ for (const DexFile* dex_file : dex_files) {
+ CHECK(dex_file != nullptr);
+ CompileDexFile(this,
+ class_loader,
+ *dex_file,
+ dex_files,
+ parallel_thread_pool_.get(),
+ parallel_thread_count_,
+ timings,
+ "Compile Dex File Quick",
+ CompileMethodQuick);
+ const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool();
+ const size_t arena_alloc = arena_pool->GetBytesAllocated();
+ max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_);
+ Runtime::Current()->ReclaimArenaPoolMemory();
+ }
-void CompilerDriver::CompileDexFile(jobject class_loader,
- const DexFile& dex_file,
- const std::vector<const DexFile*>& dex_files,
- ThreadPool* thread_pool,
- size_t thread_count,
- TimingLogger* timings) {
- TimingLogger::ScopedTiming t("Compile Dex File", timings);
- ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
- &dex_file, dex_files, thread_pool);
- CompileClassVisitor visitor(&context);
- context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
+ if (dex_to_dex_compiler_.NumCodeItemsToQuicken(Thread::Current()) > 0u) {
+ // TODO: Not visit all of the dex files, its probably rare that only one would have quickened
+ // methods though.
+ for (const DexFile* dex_file : dex_files) {
+ CompileDexFile(this,
+ class_loader,
+ *dex_file,
+ dex_files,
+ parallel_thread_pool_.get(),
+ parallel_thread_count_,
+ timings,
+ "Compile Dex File Dex2Dex",
+ CompileMethodDex2Dex);
+ }
+ dex_to_dex_compiler_.ClearState();
+ }
+
+ VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
@@ -2771,6 +2840,12 @@ void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.PrettyMethod();
}
+CompiledMethod* CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
+ CompiledMethod* ret = nullptr;
+ CHECK(compiled_methods_.Remove(method_ref, &ret));
+ return ret;
+}
+
bool CompilerDriver::GetCompiledClass(const ClassReference& ref, ClassStatus* status) const {
DCHECK(status != nullptr);
// The table doesn't know if something wasn't inserted. For this case it will return
@@ -2944,6 +3019,7 @@ void CompilerDriver::FreeThreadPools() {
void CompilerDriver::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
dex_files_for_oat_file_ = dex_files;
compiled_classes_.AddDexFiles(dex_files);
+ dex_to_dex_compiler_.SetDexFiles(dex_files);
}
void CompilerDriver::SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index ef16212fb7..2b524a347d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -35,8 +35,8 @@
#include "compiler.h"
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
+#include "dex/dex_to_dex_compiler.h"
#include "driver/compiled_method_storage.h"
-#include "jit/profile_compilation_info.h"
#include "method_reference.h"
#include "os.h"
#include "safe_map.h"
@@ -69,6 +69,7 @@ enum InvokeType : uint32_t;
class MemberOffset;
template<class MirrorType> class ObjPtr;
class ParallelCompilationManager;
+class ProfileCompilationInfo;
class ScopedObjectAccess;
template <class Allocator> class SrcMap;
class TimingLogger;
@@ -106,13 +107,13 @@ class CompilerDriver {
~CompilerDriver();
- // Set dex files that will be stored in the oat file after being compiled.
+ // Set dex files associated with the oat file being compiled.
void SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files);
// Set dex files classpath.
void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
- // Get dex file that will be stored in the oat file after being compiled.
+ // Get dex files associated with the the oat file being compiled.
ArrayRef<const DexFile* const> GetDexFilesForOatFile() const {
return ArrayRef<const DexFile* const>(dex_files_for_oat_file_);
}
@@ -120,12 +121,11 @@ class CompilerDriver {
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
- REQUIRES(!Locks::mutator_lock_, !dex_to_dex_references_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_to_dex_references_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const;
@@ -165,6 +165,7 @@ class CompilerDriver {
void AddCompiledMethod(const MethodReference& method_ref,
CompiledMethod* const compiled_method,
size_t non_relative_linker_patch_count);
+ CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
void SetRequiresConstructorBarrier(Thread* self,
const DexFile* dex_file,
@@ -362,13 +363,6 @@ class CompilerDriver {
return true;
}
- void MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref)
- REQUIRES(!dex_to_dex_references_lock_);
-
- const BitVector* GetCurrentDexToDexMethods() const {
- return current_dex_to_dex_methods_;
- }
-
const ProfileCompilationInfo* GetProfileCompilationInfo() const {
return profile_compilation_info_;
}
@@ -381,6 +375,10 @@ class CompilerDriver {
|| android::base::EndsWith(boot_image_filename, "core-optimizing.art");
}
+ optimizer::DexToDexCompiler& GetDexToDexCompiler() {
+ return dex_to_dex_compiler_;
+ }
+
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
@@ -447,14 +445,7 @@ class CompilerDriver {
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) REQUIRES(!dex_to_dex_references_lock_);
- void CompileDexFile(jobject class_loader,
- const DexFile& dex_file,
- const std::vector<const DexFile*>& dex_files,
- ThreadPool* thread_pool,
- size_t thread_count,
- TimingLogger* timings)
- REQUIRES(!Locks::mutator_lock_);
+ TimingLogger* timings);
bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const;
@@ -529,7 +520,7 @@ class CompilerDriver {
bool support_boot_image_fixup_;
- // List of dex files that will be stored in the oat file.
+ // List of dex files associates with the oat file.
std::vector<const DexFile*> dex_files_for_oat_file_;
CompiledMethodStorage compiled_method_storage_;
@@ -539,14 +530,8 @@ class CompilerDriver {
size_t max_arena_alloc_;
- // Data for delaying dex-to-dex compilation.
- Mutex dex_to_dex_references_lock_;
- // In the first phase, dex_to_dex_references_ collects methods for dex-to-dex compilation.
- class DexFileMethodSet;
- std::vector<DexFileMethodSet> dex_to_dex_references_ GUARDED_BY(dex_to_dex_references_lock_);
- // In the second phase, current_dex_to_dex_methods_ points to the BitVector with method
- // indexes for dex-to-dex compilation in the current dex file.
- const BitVector* current_dex_to_dex_methods_;
+ // Compiler for dex to dex (quickening).
+ optimizer::DexToDexCompiler dex_to_dex_compiler_;
friend class CompileClassVisitor;
friend class DexToDexDecompilerTest;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 1780b1d7ed..2d82d79c4a 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -60,6 +60,7 @@ CompilerOptions::CompilerOptions()
dump_cfg_append_(false),
force_determinism_(false),
deduplicate_code_(true),
+ count_hotness_in_compiled_code_(false),
register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
passes_to_run_(nullptr) {
}
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 3f660293d2..18b0913430 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -274,6 +274,10 @@ class CompilerOptions FINAL {
return dump_stats_;
}
+ bool CountHotnessInCompiledCode() const {
+ return count_hotness_in_compiled_code_;
+ }
+
private:
bool ParseDumpInitFailures(const std::string& option, std::string* error_msg);
void ParseDumpCfgPasses(const StringPiece& option, UsageFn Usage);
@@ -336,6 +340,10 @@ class CompilerOptions FINAL {
// Whether code should be deduplicated.
bool deduplicate_code_;
+ // Whether compiled code should increment the hotness count of ArtMethod. Note that the increments
+ // won't be atomic for performance reasons, so we accept races, just like in interpreter.
+ bool count_hotness_in_compiled_code_;
+
RegisterAllocator::Strategy register_allocation_strategy_;
// If not null, specifies optimization passes which will be run instead of defaults.
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index f97ab08600..3b18db09fc 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -77,6 +77,9 @@ inline bool ReadCompilerOptions(Base& map, CompilerOptions* options, std::string
}
map.AssignIfExists(Base::VerboseMethods, &options->verbose_methods_);
options->deduplicate_code_ = map.GetOrDefault(Base::DeduplicateCode);
+ if (map.Exists(Base::CountHotnessInCompiledCode)) {
+ options->count_hotness_in_compiled_code_ = true;
+ }
if (map.Exists(Base::DumpTimings)) {
options->dump_timings_ = true;
@@ -137,6 +140,9 @@ inline void AddCompilerOptionsArgumentParserOptions(Builder& b) {
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(Map::DeduplicateCode)
+ .Define({"--count-hotness-in-compiled-code"})
+ .IntoKey(Map::CountHotnessInCompiledCode)
+
.Define({"--dump-timings"})
.IntoKey(Map::DumpTimings)
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 2c56fd7974..acddae7299 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -58,6 +58,7 @@ COMPILER_OPTIONS_KEY (Unit, DumpCFGAppend)
COMPILER_OPTIONS_KEY (std::string, RegisterAllocationStrategy)
COMPILER_OPTIONS_KEY (ParseStringList<','>, VerboseMethods)
COMPILER_OPTIONS_KEY (bool, DeduplicateCode, true)
+COMPILER_OPTIONS_KEY (Unit, CountHotnessInCompiledCode)
COMPILER_OPTIONS_KEY (Unit, DumpTimings)
COMPILER_OPTIONS_KEY (Unit, DumpStats)
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 88e3e5b230..17b94d3bdf 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,6 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index cedbe5d97f..6e0286afac 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -250,12 +250,12 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn
for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
debug::MethodDebugInfo info = {};
if (i == 0u) {
- info.trampoline_name = base_name;
+ info.custom_name = base_name;
} else {
// Add a disambiguating tag for subsequent identical thunks. Since the `thunks_`
// keeps records also for thunks in previous oat files, names based on the thunk
// index shall be unique across the whole multi-oat output.
- info.trampoline_name = base_name + "_" + std::to_string(i);
+ info.custom_name = base_name + "_" + std::to_string(i);
}
info.isa = instruction_set_;
info.is_code_address_text_relative = true;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index aa3cd98595..3145497091 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -38,9 +38,10 @@ namespace linker {
// Elf_Ehdr - The ELF header.
// Elf_Phdr[] - Program headers for the linker.
// .note.gnu.build-id - Optional build ID section (SHA-1 digest).
-// .rodata - DEX files and oat metadata.
+// .rodata - Oat metadata.
// .text - Compiled code.
// .bss - Zero-initialized writeable section.
+// .dex - Reserved NOBITS space for dex-related data.
// .MIPS.abiflags - MIPS specific section.
// .dynstr - Names for .dynsym.
// .dynsym - A few oat-specific dynamic symbols.
@@ -195,6 +196,11 @@ class ElfBuilder FINAL {
return section_index_;
}
+ // Returns true if this section has been added.
+ bool Exists() const {
+ return section_index_ != 0;
+ }
+
private:
// Add this section to the list of generated ELF sections (if not there already).
// It also ensures the alignment is sufficient to generate valid program headers,
@@ -303,42 +309,46 @@ class ElfBuilder FINAL {
/* info */ 0,
align,
/* entsize */ 0),
- current_offset_(0) {
+ current_offset_(0),
+ last_offset_(0) {
}
Elf_Word Write(const std::string& name) {
if (current_offset_ == 0) {
DCHECK(name.empty());
+ } else if (name == last_name_) {
+ return last_offset_; // Very simple string de-duplication.
}
- Elf_Word offset = current_offset_;
+ last_name_ = name;
+ last_offset_ = current_offset_;
this->WriteFully(name.c_str(), name.length() + 1);
current_offset_ += name.length() + 1;
- return offset;
+ return last_offset_;
}
private:
Elf_Word current_offset_;
+ std::string last_name_;
+ Elf_Word last_offset_;
};
// Writer of .dynsym and .symtab sections.
- class SymbolSection FINAL : public CachedSection {
+ class SymbolSection FINAL : public Section {
public:
SymbolSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
Elf_Word type,
Elf_Word flags,
Section* strtab)
- : CachedSection(owner,
- name,
- type,
- flags,
- strtab,
- /* info */ 0,
- sizeof(Elf_Off),
- sizeof(Elf_Sym)) {
- // The symbol table always has to start with NULL symbol.
- Elf_Sym null_symbol = Elf_Sym();
- CachedSection::Add(&null_symbol, sizeof(null_symbol));
+ : Section(owner,
+ name,
+ type,
+ flags,
+ strtab,
+ /* info */ 0,
+ sizeof(Elf_Off),
+ sizeof(Elf_Sym)) {
+ syms_.push_back(Elf_Sym()); // The symbol table always has to start with NULL symbol.
}
// Buffer symbol for this section. It will be written later.
@@ -361,6 +371,7 @@ class ElfBuilder FINAL {
Add(name, section_index, addr, size, binding, type);
}
+ // Buffer symbol for this section. It will be written later.
void Add(Elf_Word name,
Elf_Word section_index,
Elf_Addr addr,
@@ -374,8 +385,19 @@ class ElfBuilder FINAL {
sym.st_other = 0;
sym.st_shndx = section_index;
sym.st_info = (binding << 4) + (type & 0xf);
- CachedSection::Add(&sym, sizeof(sym));
+ syms_.push_back(sym);
}
+
+ Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
+
+ void WriteCachedSection() {
+ this->Start();
+ this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+ this->End();
+ }
+
+ private:
+ std::vector<Elf_Sym> syms_; // Buffered/cached content of the whole section.
};
class AbiflagsSection FINAL : public Section {
@@ -503,6 +525,7 @@ class ElfBuilder FINAL {
rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0),
bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
+ dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize),
dynsym_(this, ".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_),
hash_(this, ".hash", SHT_HASH, SHF_ALLOC, &dynsym_, 0, sizeof(Elf_Word), sizeof(Elf_Word)),
@@ -525,6 +548,7 @@ class ElfBuilder FINAL {
virtual_address_(0) {
text_.phdr_flags_ = PF_R | PF_X;
bss_.phdr_flags_ = PF_R | PF_W;
+ dex_.phdr_flags_ = PF_R;
dynamic_.phdr_flags_ = PF_R | PF_W;
dynamic_.phdr_type_ = PT_DYNAMIC;
eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME;
@@ -538,6 +562,7 @@ class ElfBuilder FINAL {
Section* GetRoData() { return &rodata_; }
Section* GetText() { return &text_; }
Section* GetBss() { return &bss_; }
+ Section* GetDex() { return &dex_; }
StringSection* GetStrTab() { return &strtab_; }
SymbolSection* GetSymTab() { return &symtab_; }
Section* GetEhFrame() { return &eh_frame_; }
@@ -666,7 +691,8 @@ class ElfBuilder FINAL {
Elf_Word text_size,
Elf_Word bss_size,
Elf_Word bss_methods_offset,
- Elf_Word bss_roots_offset) {
+ Elf_Word bss_roots_offset,
+ Elf_Word dex_size) {
std::string soname(elf_file_path);
size_t directory_separator_pos = soname.rfind('/');
if (directory_separator_pos != std::string::npos) {
@@ -679,6 +705,9 @@ class ElfBuilder FINAL {
if (bss_size != 0) {
bss_.AllocateVirtualMemory(bss_size);
}
+ if (dex_size != 0) {
+ dex_.AllocateVirtualMemory(dex_size);
+ }
if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) {
abiflags_.AllocateVirtualMemory(abiflags_.GetSize());
}
@@ -725,6 +754,14 @@ class ElfBuilder FINAL {
Elf_Word bsslastword_address = bss_.GetAddress() + bss_size - 4;
dynsym_.Add(oatbsslastword, &bss_, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT);
}
+ if (dex_size != 0u) {
+ Elf_Word oatdex = dynstr_.Add("oatdex");
+ dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), dex_size, STB_GLOBAL, STT_OBJECT);
+ Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
+ Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
+ dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
+ }
+
Elf_Word soname_offset = dynstr_.Add(soname);
// We do not really need a hash-table since there is so few entries.
@@ -967,6 +1004,7 @@ class ElfBuilder FINAL {
Section rodata_;
Section text_;
Section bss_;
+ Section dex_;
CachedStringSection dynstr_;
SymbolSection dynsym_;
CachedSection hash_;
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 9c2068ec5e..d893cc88c4 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -302,7 +302,7 @@ class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
ValueBound GetLower() const { return lower_; }
ValueBound GetUpper() const { return upper_; }
- bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); }
+ bool IsConstantValueRange() const { return lower_.IsConstant() && upper_.IsConstant(); }
// If it's certain that this value range fits in other_range.
virtual bool FitsIn(ValueRange* other_range) const {
@@ -789,24 +789,33 @@ class BCEVisitor : public HGraphVisitor {
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
- if (left->IsArrayLength() && lower.IsConstant() && upper.IsConstant()) {
- // Special case:
- // length == [c,d] yields [c, d] along true
- // length != [c,d] yields [c, d] along false
- if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
- ApplyRangeFromComparison(
- left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
- }
- // In addition:
- // length == 0 yields [1, max] along false
- // length != 0 yields [1, max] along true
- if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (&allocator_) ValueRange(
- &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
- ApplyRangeFromComparison(
- left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
+ if (left->IsArrayLength()) {
+ if (lower.IsConstant() && upper.IsConstant()) {
+ // Special case:
+ // length == [c,d] yields [c, d] along true
+ // length != [c,d] yields [c, d] along false
+ if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
+ ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
+ }
+ // In addition:
+ // length == 0 yields [1, max] along false
+ // length != 0 yields [1, max] along true
+ if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
+ }
}
+ } else if (lower.IsRelatedToArrayLength() && lower.Equals(upper)) {
+ // Special aliasing case, with x not array length itself:
+ // x == [length,length] yields x == length along true
+ // x != [length,length] yields x == length along false
+ ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
}
}
@@ -827,9 +836,23 @@ class BCEVisitor : public HGraphVisitor {
ValueRange array_range(&allocator_, lower, upper);
// Try index range obtained by dominator-based analysis.
ValueRange* index_range = LookupValueRange(index, block);
- if (index_range != nullptr && index_range->FitsIn(&array_range)) {
- ReplaceInstruction(bounds_check, index);
- return;
+ if (index_range != nullptr) {
+ if (index_range->FitsIn(&array_range)) {
+ ReplaceInstruction(bounds_check, index);
+ return;
+ } else if (index_range->IsConstantValueRange()) {
+ // If the non-constant index turns out to have a constant range,
+ // make one more attempt to get a constant in the array range.
+ ValueRange* existing_range = LookupValueRange(array_length, block);
+ if (existing_range != nullptr &&
+ existing_range->IsConstantValueRange()) {
+ ValueRange constant_array_range(&allocator_, lower, existing_range->GetLower());
+ if (index_range->FitsIn(&constant_array_range)) {
+ ReplaceInstruction(bounds_check, index);
+ return;
+ }
+ }
+ }
}
// Try index range obtained by induction variable analysis.
// Disables dynamic bce if OOB is certain.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index af537dd653..a1a5692ef6 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -43,7 +43,7 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(&graph->GetDexFile()),
@@ -70,7 +70,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
compiler_driver_(nullptr),
code_generator_(nullptr),
compilation_stats_(nullptr),
- interpreter_metadata_(nullptr),
handles_(handles),
return_type_(return_type) {}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c16a3a928d..5a1914ce08 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "dex/code_item_accessors.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file.h"
@@ -40,7 +41,7 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
VariableSizedHandleScope* handles);
// Only for unit testing.
@@ -73,7 +74,7 @@ class HGraphBuilder : public ValueObject {
CodeGenerator* const code_generator_;
OptimizingCompilerStats* const compilation_stats_;
- const uint8_t* const interpreter_metadata_;
+ const ArrayRef<const uint8_t> interpreter_metadata_;
VariableSizedHandleScope* const handles_;
const DataType::Type return_type_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 13886b32b3..3fd88e3e18 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1488,6 +1488,14 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
MacroAssembler* masm = GetVIXLAssembler();
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(masm);
+ Register temp = temps.AcquireX();
+ __ Ldrh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
if (do_overflow_check) {
@@ -1881,6 +1889,8 @@ void CodeGeneratorARM64::Load(DataType::Type type,
DCHECK_EQ(dst.Is64Bits(), DataType::Is64BitType(type));
__ Ldr(dst, src);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1959,6 +1969,8 @@ void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
__ Fmov(FPRegister(dst), temp);
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -1986,6 +1998,8 @@ void CodeGeneratorARM64::Store(DataType::Type type,
DCHECK_EQ(src.Is64Bits(), DataType::Is64BitType(type));
__ Str(src, dst);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -2063,6 +2077,8 @@ void CodeGeneratorARM64::StoreRelease(HInstruction* instruction,
}
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
}
@@ -2097,13 +2113,17 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
- size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
// TODO(vixl): Let the MacroAssembler handle MemOperand.
- __ Add(temp, class_reg, status_offset);
+ __ Add(temp, class_reg, status_byte_offset);
__ Ldarb(temp, HeapOperand(temp));
- __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ __ Cmp(temp, shifted_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -3487,12 +3507,25 @@ void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant
}
void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp1 = temps.AcquireX();
+ Register temp2 = temps.AcquireX();
+ __ Ldr(temp1, MemOperand(sp, 0));
+ __ Ldrh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp2, temp2, 1);
+ __ Strh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7c6a5fde40..6d49b32dbc 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2485,13 +2485,36 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ }
+
if (HasEmptyFrame()) {
return;
}
if (!skip_overflow_check) {
+ // Using r4 instead of IP saves 2 bytes.
UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
+ vixl32::Register temp;
+ // TODO: Remove this check when R4 is made a callee-save register
+ // in ART compiled code (b/72801708). Currently we need to make
+ // sure r4 is not blocked, e.g. in special purpose
+ // TestCodeGeneratorARMVIXL; also asserting that r4 is available
+ // here.
+ if (!blocked_core_registers_[R4]) {
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ temp = r4;
+ } else {
+ temp = temps.Acquire();
+ }
__ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
@@ -2642,6 +2665,8 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetNextLocation(DataType::Typ
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -2657,6 +2682,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32: {
return LocationFrom(r0);
}
@@ -2665,6 +2691,7 @@ Location InvokeDexCallingConventionVisitorARMVIXL::GetReturnLocation(DataType::T
return LocationFrom(s0);
}
+ case DataType::Type::kUint64:
case DataType::Type::kInt64: {
return LocationFrom(r0, r1);
}
@@ -2776,12 +2803,26 @@ void CodeGeneratorARMVIXL::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_poi
}
void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ vixl32::Register temp = temps.Acquire();
+ __ Push(vixl32::Register(kMethodRegister));
+ GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize);
+ __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Add(temp, temp, 1);
+ __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+ __ Pop(vixl32::Register(kMethodRegister));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -5490,6 +5531,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5734,6 +5777,8 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -6226,6 +6271,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6515,6 +6562,8 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << value_type;
UNREACHABLE();
@@ -7172,11 +7221,14 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
- temp,
- class_reg,
- mirror::Class::StatusOffset().Int32Value());
- __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ GetAssembler()->LoadFromOffset(kLoadUnsignedByte, temp, class_reg, status_byte_offset);
+ __ Cmp(temp, shifted_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we may be in a situation where caches are not synced
// properly. Therefore, we do a memory fence.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ebe252a9c8..855da2b18f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -58,9 +58,11 @@ Location MipsReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(V0);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(V0, V1);
@@ -140,6 +142,8 @@ Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type t
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -391,7 +395,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -1276,6 +1280,10 @@ static dwarf::Reg DWARFReg(Register reg) {
void CodeGeneratorMIPS::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
@@ -1915,8 +1923,14 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
Register class_reg) {
- __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
+ __ LoadConst32(AT, shifted_initialized_value);
__ Bltu(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
@@ -2811,6 +2825,8 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3126,6 +3142,8 @@ void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -3265,26 +3283,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3313,18 +3313,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
MipsLabel done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
instruction, is_type_check_slow_path_fatal);
@@ -4028,7 +4017,11 @@ void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
}
void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
@@ -6306,6 +6299,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kFloat64:
load_type = kLoadDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -6459,6 +6454,8 @@ void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -7187,11 +7184,12 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -7239,13 +7237,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -7253,13 +7253,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
@@ -7269,7 +7271,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -7278,13 +7280,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
@@ -7294,7 +7298,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -7304,13 +7308,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
@@ -7320,7 +7326,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 3ea7b827bb..8a06061c6a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -55,8 +55,10 @@ Location Mips64ReturnLocation(DataType::Type return_type) {
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kReference:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(V0);
@@ -350,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -1079,6 +1081,10 @@ static dwarf::Reg DWARFReg(FpuRegister reg) {
void CodeGeneratorMIPS64::GenerateFrameEntry() {
__ Bind(&frame_entry_label_);
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ LOG(WARNING) << "Unimplemented hotness update in mips64 backend";
+ }
+
bool do_overflow_check =
FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
@@ -1761,8 +1767,14 @@ void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
GpuRegister class_reg) {
- __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
+ __ LoadConst32(AT, shifted_initialized_value);
__ Bltuc(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
@@ -2398,6 +2410,8 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2701,6 +2715,8 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2820,26 +2836,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2868,18 +2866,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Mips64Label done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
instruction, is_type_check_slow_path_fatal);
@@ -3556,7 +3543,11 @@ void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant
}
void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
@@ -4784,6 +4775,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
case DataType::Type::kReference:
load_type = kLoadUnsignedWord;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4877,6 +4870,8 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
case DataType::Type::kFloat64:
store_type = kStoreDoubleword;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5504,11 +5499,12 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -5556,13 +5552,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5570,13 +5568,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
@@ -5586,7 +5586,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -5595,13 +5595,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
@@ -5611,7 +5613,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -5621,13 +5623,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
@@ -5637,7 +5641,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 152a59c208..174efdf115 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -606,22 +606,20 @@ void InstructionCodeGeneratorARM64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smin(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umin(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmin(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
@@ -656,22 +654,20 @@ void InstructionCodeGeneratorARM64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Smax(dst.V8H(), lhs.V8H(), rhs.V8H());
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Umax(dst.V4S(), lhs.V4S(), rhs.V4S());
- } else {
- __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
- }
+ __ Smax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V4S(), lhs.V4S(), rhs.V4S());
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ Fmax(dst.V2D(), lhs.V2D(), rhs.V2D());
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_arm_vixl.cc b/compiler/optimizing/code_generator_vector_arm_vixl.cc
index cc470ddb2e..7c3155ab73 100644
--- a/compiler/optimizing/code_generator_vector_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_vector_arm_vixl.cc
@@ -431,13 +431,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmin(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmin(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmin(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
@@ -471,13 +471,13 @@ void InstructionCodeGeneratorARMVIXL::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(4u, instruction->GetVectorLength());
__ Vmax(DataTypeValue::S16, dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Vmax(DataTypeValue::U32, dst, lhs, rhs);
- } else {
- __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
- }
+ __ Vmax(DataTypeValue::S32, dst, lhs, rhs);
break;
default:
LOG(FATAL) << "Unsupported SIMD type";
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
index 3cf150a6b8..ed9de96496 100644
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ b/compiler/optimizing/code_generator_vector_mips.cc
@@ -613,32 +613,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -673,32 +671,30 @@ void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
index 2d69533f21..9ea55ec8d7 100644
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ b/compiler/optimizing/code_generator_vector_mips64.cc
@@ -612,32 +612,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Min_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Min_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uW(dst, lhs, rhs);
- } else {
- __ Min_sW(dst, lhs, rhs);
- }
+ __ Min_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Min_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Min_uD(dst, lhs, rhs);
- } else {
- __ Min_sD(dst, lhs, rhs);
- }
+ __ Min_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
// TODO: Fix min(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FminD(dst, lhs, rhs);
break;
default:
@@ -672,32 +670,30 @@ void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ Max_sH(dst, lhs, rhs);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ Max_uW(dst, lhs, rhs);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uW(dst, lhs, rhs);
- } else {
- __ Max_sW(dst, lhs, rhs);
- }
+ __ Max_sW(dst, lhs, rhs);
+ break;
+ case DataType::Type::kUint64:
+ DCHECK_EQ(2u, instruction->GetVectorLength());
+ __ Max_uD(dst, lhs, rhs);
break;
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ Max_uD(dst, lhs, rhs);
- } else {
- __ Max_sD(dst, lhs, rhs);
- }
+ __ Max_sD(dst, lhs, rhs);
break;
// When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
// TODO: Fix max(x, NaN) cases for float and double.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxW(dst, lhs, rhs);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ FmaxD(dst, lhs, rhs);
break;
default:
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index ad8128a5b1..f2ffccc887 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -92,8 +92,8 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i
__ pshufd(dst, dst, Immediate(0));
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ movd(dst, locations->InAt(0).AsRegisterPairLow<Register>());
__ movd(tmp, locations->InAt(0).AsRegisterPairHigh<Register>());
__ punpckldq(dst, tmp);
@@ -101,13 +101,13 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i
break;
}
case DataType::Type::kFloat32:
- DCHECK(locations->InAt(0).Equals(locations->Out()));
DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
__ shufps(dst, dst, Immediate(0));
break;
case DataType::Type::kFloat64:
- DCHECK(locations->InAt(0).Equals(locations->Out()));
DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
__ shufpd(dst, dst, Immediate(0));
break;
default:
@@ -160,8 +160,8 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr
__ movd(locations->Out().AsRegister<Register>(), src);
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ movd(locations->Out().AsRegisterPairLow<Register>(), src);
__ pshufd(tmp, src, Immediate(1));
__ movd(locations->Out().AsRegisterPairHigh<Register>(), tmp);
@@ -640,23 +640,21 @@ void InstructionCodeGeneratorX86::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -691,23 +689,21 @@ void InstructionCodeGeneratorX86::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
@@ -1022,8 +1018,8 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction
__ movd(dst, locations->InAt(0).AsRegister<Register>());
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ xorps(tmp, tmp);
__ movd(dst, locations->InAt(0).AsRegisterPairLow<Register>());
__ movd(tmp, locations->InAt(0).AsRegisterPairHigh<Register>());
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 107030e6c2..e2b0485f89 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -623,23 +623,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMin(HVecMin* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pminsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pminud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pminud(dst, src);
- } else {
- __ pminsd(dst, src);
- }
+ __ pminsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ minpd(dst, src);
break;
default:
@@ -674,23 +672,21 @@ void InstructionCodeGeneratorX86_64::VisitVecMax(HVecMax* instruction) {
DCHECK_EQ(8u, instruction->GetVectorLength());
__ pmaxsw(dst, src);
break;
+ case DataType::Type::kUint32:
+ DCHECK_EQ(4u, instruction->GetVectorLength());
+ __ pmaxud(dst, src);
+ break;
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- if (instruction->IsUnsigned()) {
- __ pmaxud(dst, src);
- } else {
- __ pmaxsd(dst, src);
- }
+ __ pmaxsd(dst, src);
break;
// Next cases are sloppy wrt 0.0 vs -0.0.
case DataType::Type::kFloat32:
DCHECK_EQ(4u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxps(dst, src);
break;
case DataType::Type::kFloat64:
DCHECK_EQ(2u, instruction->GetVectorLength());
- DCHECK(!instruction->IsUnsigned());
__ maxpd(dst, src);
break;
default:
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 68532386e1..5fede80bc7 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1061,6 +1061,11 @@ void CodeGeneratorX86::GenerateFrameEntry() {
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
__ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
@@ -1129,9 +1134,11 @@ Location InvokeDexCallingConventionVisitorX86::GetReturnLocation(DataType::Type
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
return Location::RegisterLocation(EAX);
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterPairLocation(EAX, EDX);
@@ -1201,6 +1208,8 @@ Location InvokeDexCallingConventionVisitorX86::GetNextLocation(DataType::Type ty
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -1347,13 +1356,22 @@ void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* loc
}
void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ pushl(EAX);
+ __ movl(EAX, Address(ESP, kX86WordSize));
+ __ addw(Address(EAX, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(1));
+ __ popl(EAX);
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -4830,6 +4848,8 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -5003,6 +5023,8 @@ void InstructionCodeGeneratorX86::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -5306,6 +5328,8 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -5557,6 +5581,8 @@ void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -6219,8 +6245,13 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
- __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the X86 memory model.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1f8d822507..ae35ab5983 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1268,6 +1268,12 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
+ if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ addw(Address(CpuRegister(kMethodRegisterArgument),
+ ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
+
if (!skip_overflow_check) {
size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
__ testq(CpuRegister(RAX), Address(CpuRegister(RSP), -static_cast<int32_t>(reserved_bytes)));
@@ -1449,13 +1455,21 @@ void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary*
}
void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
+ if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
+ __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), 0));
+ __ addw(Address(CpuRegister(TMP), ArtMethod::HotnessCountOffset().Int32Value()),
+ Immediate(1));
+ }
GenerateSuspendCheck(info->GetSuspendCheck(), successor);
return;
}
@@ -2259,7 +2273,9 @@ Location InvokeDexCallingConventionVisitorX86_64::GetReturnLocation(DataType::Ty
case DataType::Type::kInt8:
case DataType::Type::kUint16:
case DataType::Type::kInt16:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
return Location::RegisterLocation(RAX);
@@ -2328,6 +2344,8 @@ Location InvokeDexCallingConventionVisitorX86_64::GetNextLocation(DataType::Type
}
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected parameter type " << type;
break;
@@ -4293,6 +4311,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << load_type;
UNREACHABLE();
@@ -4456,6 +4476,8 @@ void InstructionCodeGeneratorX86_64::HandleFieldSet(HInstruction* instruction,
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -4749,6 +4771,8 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << type;
UNREACHABLE();
@@ -4988,6 +5012,8 @@ void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
break;
}
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -5425,8 +5451,13 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
- __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the x86-64 memory model.
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d8ebac95a8..f4760d661f 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -34,7 +34,9 @@ void CodeSinking::Run() {
// TODO(ngeoffray): we do not profile branches yet, so use throw instructions
// as an indicator of an uncommon branch.
for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
- if (exit_predecessor->GetLastInstruction()->IsThrow()) {
+ HInstruction* last = exit_predecessor->GetLastInstruction();
+ // Any predecessor of the exit that does not return, throws an exception.
+ if (!last->IsReturn() && !last->IsReturnVoid()) {
SinkCodeToUncommonBranch(exit_predecessor);
}
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6eda289861..ba4040acad 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -74,8 +74,8 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
class CodegenTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0);
- void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected);
+ void TestCode(const std::vector<uint16_t>& data, bool has_result = false, int32_t expected = 0);
+ void TestCodeLong(const std::vector<uint16_t>& data, bool has_result, int64_t expected);
void TestComparison(IfCondition condition,
int64_t i,
int64_t j,
@@ -83,7 +83,7 @@ class CodegenTest : public OptimizingUnitTest {
const CodegenTargetConfig target_config);
};
-void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) {
+void CodegenTest::TestCode(const std::vector<uint16_t>& data, bool has_result, int32_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
ResetPoolAndAllocator();
HGraph* graph = CreateCFG(data);
@@ -93,7 +93,8 @@ void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expect
}
}
-void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
+void CodegenTest::TestCodeLong(const std::vector<uint16_t>& data,
+ bool has_result, int64_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
ResetPoolAndAllocator();
HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
@@ -104,12 +105,12 @@ void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t ex
}
TEST_F(CodegenTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
}
TEST_F(CodegenTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -117,7 +118,7 @@ TEST_F(CodegenTest, CFG1) {
}
TEST_F(CodegenTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -126,21 +127,21 @@ TEST_F(CodegenTest, CFG2) {
}
TEST_F(CodegenTest, CFG3) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
TestCode(data1);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2);
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -149,7 +150,7 @@ TEST_F(CodegenTest, CFG3) {
}
TEST_F(CodegenTest, CFG4) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
@@ -158,7 +159,7 @@ TEST_F(CodegenTest, CFG4) {
}
TEST_F(CodegenTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -168,7 +169,7 @@ TEST_F(CodegenTest, CFG5) {
}
TEST_F(CodegenTest, IntConstant) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -176,7 +177,7 @@ TEST_F(CodegenTest, IntConstant) {
}
TEST_F(CodegenTest, Return1) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN | 0);
@@ -184,7 +185,7 @@ TEST_F(CodegenTest, Return1) {
}
TEST_F(CodegenTest, Return2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 0 | 1 << 8,
Instruction::RETURN | 1 << 8);
@@ -193,7 +194,7 @@ TEST_F(CodegenTest, Return2) {
}
TEST_F(CodegenTest, Return3) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::RETURN | 1 << 8);
@@ -202,7 +203,7 @@ TEST_F(CodegenTest, Return3) {
}
TEST_F(CodegenTest, ReturnIf1) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::IF_EQ, 3,
@@ -213,7 +214,7 @@ TEST_F(CodegenTest, ReturnIf1) {
}
TEST_F(CodegenTest, ReturnIf2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::IF_EQ | 0 << 4 | 1 << 8, 3,
@@ -224,17 +225,17 @@ TEST_F(CodegenTest, ReturnIf2) {
}
// Exercise bit-wise (one's complement) not-int instruction.
-#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_F(CodegenTest, TEST_NAME) { \
- const int32_t input = INPUT; \
- const uint16_t input_lo = Low16Bits(input); \
- const uint16_t input_hi = High16Bits(input); \
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
- Instruction::CONST | 0 << 8, input_lo, input_hi, \
- Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
- Instruction::RETURN | 1 << 8); \
- \
- TestCode(data, true, EXPECTED_OUTPUT); \
+#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST_F(CodegenTest, TEST_NAME) { \
+ const int32_t input = INPUT; \
+ const uint16_t input_lo = Low16Bits(input); \
+ const uint16_t input_hi = High16Bits(input); \
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST | 0 << 8, input_lo, input_hi, \
+ Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
+ Instruction::RETURN | 1 << 8); \
+ \
+ TestCode(data, true, EXPECTED_OUTPUT); \
}
NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
@@ -256,7 +257,7 @@ TEST_F(CodegenTest, TEST_NAME) { \
const uint16_t word1 = High16Bits(Low32Bits(input)); \
const uint16_t word2 = Low16Bits(High32Bits(input)); \
const uint16_t word3 = High16Bits(High32Bits(input)); /* MSW. */ \
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM( \
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM( \
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, \
Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
Instruction::RETURN_WIDE | 2 << 8); \
@@ -306,7 +307,7 @@ TEST_F(CodegenTest, IntToLongOfLongToInt) {
const uint16_t word1 = High16Bits(Low32Bits(input));
const uint16_t word2 = Low16Bits(High32Bits(input));
const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
- const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FIVE_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
@@ -318,7 +319,7 @@ TEST_F(CodegenTest, IntToLongOfLongToInt) {
}
TEST_F(CodegenTest, ReturnAdd1) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT, 1 << 8 | 0,
@@ -328,7 +329,7 @@ TEST_F(CodegenTest, ReturnAdd1) {
}
TEST_F(CodegenTest, ReturnAdd2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -338,7 +339,7 @@ TEST_F(CodegenTest, ReturnAdd2) {
}
TEST_F(CodegenTest, ReturnAdd3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -347,7 +348,7 @@ TEST_F(CodegenTest, ReturnAdd3) {
}
TEST_F(CodegenTest, ReturnAdd4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT16, 3,
Instruction::RETURN);
@@ -356,7 +357,7 @@ TEST_F(CodegenTest, ReturnAdd4) {
}
TEST_F(CodegenTest, ReturnMulInt) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT, 1 << 8 | 0,
@@ -366,7 +367,7 @@ TEST_F(CodegenTest, ReturnMulInt) {
}
TEST_F(CodegenTest, ReturnMulInt2addr) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT_2ADDR | 1 << 12,
@@ -376,7 +377,7 @@ TEST_F(CodegenTest, ReturnMulInt2addr) {
}
TEST_F(CodegenTest, ReturnMulLong) {
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG, 2 << 8 | 0,
@@ -386,7 +387,7 @@ TEST_F(CodegenTest, ReturnMulLong) {
}
TEST_F(CodegenTest, ReturnMulLong2addr) {
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG_2ADDR | 2 << 12,
@@ -396,7 +397,7 @@ TEST_F(CodegenTest, ReturnMulLong2addr) {
}
TEST_F(CodegenTest, ReturnMulIntLit8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -405,7 +406,7 @@ TEST_F(CodegenTest, ReturnMulIntLit8) {
}
TEST_F(CodegenTest, ReturnMulIntLit16) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
Instruction::RETURN);
@@ -578,7 +579,7 @@ TEST_F(CodegenTest, MaterializedCondition2) {
}
TEST_F(CodegenTest, ReturnDivIntLit8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::DIV_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -587,7 +588,7 @@ TEST_F(CodegenTest, ReturnDivIntLit8) {
}
TEST_F(CodegenTest, ReturnDivInt2Addr) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::CONST_4 | 2 << 12 | 1 << 8,
Instruction::DIV_INT_2ADDR | 1 << 12,
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index e1980e080e..d27104752b 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -36,7 +36,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
public:
ConstantFoldingTest() : graph_(nullptr) { }
- void TestCode(const uint16_t* data,
+ void TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after_cf,
const std::string& expected_after_dce,
@@ -100,7 +100,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
* return v1 2. return v1
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingNegation) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::NEG_INT | 1 << 8 | 0 << 12,
Instruction::RETURN | 1 << 8);
@@ -161,7 +161,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
const uint16_t word1 = High16Bits(Low32Bits(input));
const uint16_t word2 = Low16Bits(High32Bits(input));
const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
Instruction::NEG_LONG | 2 << 8 | 0 << 12,
Instruction::RETURN_WIDE | 2 << 8);
@@ -219,7 +219,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
* return v2 4. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
@@ -284,7 +284,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
* return v2 8. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT_2ADDR | 0 << 8 | 1 << 12,
@@ -369,7 +369,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
* return v2 4. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 3 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::SUB_INT | 2 << 8, 0 | 1 << 8,
@@ -432,7 +432,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
* return (v4, v5) 6. return-wide v4
*/
TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 1,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
Instruction::ADD_LONG | 4 << 8, 0 | 2 << 8,
@@ -496,7 +496,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
* return (v4, v5) 6. return-wide v4
*/
TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 3,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
Instruction::SUB_LONG | 4 << 8, 0 | 2 << 8,
@@ -569,7 +569,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
* return v2 13. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
@@ -672,7 +672,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
* return-void 7. return
*/
TEST_F(ConstantFoldingTest, ConstantCondition) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::IF_GEZ | 1 << 8, 3,
diff --git a/compiler/optimizing/data_type-inl.h b/compiler/optimizing/data_type-inl.h
index e389bad3ad..e2cf7a80fe 100644
--- a/compiler/optimizing/data_type-inl.h
+++ b/compiler/optimizing/data_type-inl.h
@@ -53,7 +53,9 @@ constexpr char DataType::TypeId(DataType::Type type) {
case DataType::Type::kInt8: return 'b'; // Java byte (B).
case DataType::Type::kUint16: return 'c'; // Java char (C).
case DataType::Type::kInt16: return 's'; // Java short (S).
+ case DataType::Type::kUint32: return 'u'; // Picked 'u' for unsigned.
case DataType::Type::kInt32: return 'i'; // Java int (I).
+ case DataType::Type::kUint64: return 'w'; // Picked 'w' for long unsigned.
case DataType::Type::kInt64: return 'j'; // Java long (J).
case DataType::Type::kFloat32: return 'f'; // Java float (F).
case DataType::Type::kFloat64: return 'd'; // Java double (D).
diff --git a/compiler/optimizing/data_type.cc b/compiler/optimizing/data_type.cc
index 3c99a76c17..cb354f46cc 100644
--- a/compiler/optimizing/data_type.cc
+++ b/compiler/optimizing/data_type.cc
@@ -25,7 +25,9 @@ static const char* kTypeNames[] = {
"Int8",
"Uint16",
"Int16",
+ "Uint32",
"Int32",
+ "Uint64",
"Int64",
"Float32",
"Float64",
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 548fe28cee..4a6c91459f 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -34,7 +34,9 @@ class DataType {
kInt8,
kUint16,
kInt16,
+ kUint32,
kInt32,
+ kUint64,
kInt64,
kFloat32,
kFloat64,
@@ -55,9 +57,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 1;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 2;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 3;
@@ -80,9 +84,11 @@ class DataType {
case Type::kUint16:
case Type::kInt16:
return 2;
+ case Type::kUint32:
case Type::kInt32:
case Type::kFloat32:
return 4;
+ case Type::kUint64:
case Type::kInt64:
case Type::kFloat64:
return 8;
@@ -107,7 +113,9 @@ class DataType {
case Type::kInt8:
case Type::kUint16:
case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
+ case Type::kUint64:
case Type::kInt64:
return true;
default:
@@ -120,11 +128,12 @@ class DataType {
}
static bool Is64BitType(Type type) {
- return type == Type::kInt64 || type == Type::kFloat64;
+ return type == Type::kUint64 || type == Type::kInt64 || type == Type::kFloat64;
}
static bool IsUnsignedType(Type type) {
- return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16;
+ return type == Type::kBool || type == Type::kUint8 || type == Type::kUint16 ||
+ type == Type::kUint32 || type == Type::kUint64;
}
// Return the general kind of `type`, fusing integer-like types as Type::kInt.
@@ -133,10 +142,14 @@ class DataType {
case Type::kBool:
case Type::kUint8:
case Type::kInt8:
- case Type::kInt16:
case Type::kUint16:
+ case Type::kInt16:
+ case Type::kUint32:
case Type::kInt32:
return Type::kInt32;
+ case Type::kUint64:
+ case Type::kInt64:
+ return Type::kInt64;
default:
return type;
}
@@ -154,8 +167,12 @@ class DataType {
return std::numeric_limits<uint16_t>::min();
case Type::kInt16:
return std::numeric_limits<int16_t>::min();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::min();
case Type::kInt32:
return std::numeric_limits<int32_t>::min();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::min();
case Type::kInt64:
return std::numeric_limits<int64_t>::min();
default:
@@ -176,8 +193,12 @@ class DataType {
return std::numeric_limits<uint16_t>::max();
case Type::kInt16:
return std::numeric_limits<int16_t>::max();
+ case Type::kUint32:
+ return std::numeric_limits<uint32_t>::max();
case Type::kInt32:
return std::numeric_limits<int32_t>::max();
+ case Type::kUint64:
+ return std::numeric_limits<uint64_t>::max();
case Type::kInt64:
return std::numeric_limits<int64_t>::max();
default:
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 3cc7b0e78d..9fa0f72e80 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -146,6 +146,141 @@ static HConstant* Evaluate(HCondition* condition, HInstruction* left, HInstructi
}
}
+static bool RemoveNonNullControlDependences(HBasicBlock* block, HBasicBlock* throws) {
+ // Test for an if as last statement.
+ if (!block->EndsWithIf()) {
+ return false;
+ }
+ HIf* ifs = block->GetLastInstruction()->AsIf();
+ // Find either:
+ // if obj == null
+ // throws
+ // else
+ // not_throws
+ // or:
+ // if obj != null
+ // not_throws
+ // else
+ // throws
+ HInstruction* cond = ifs->InputAt(0);
+ HBasicBlock* not_throws = nullptr;
+ if (throws == ifs->IfTrueSuccessor() && cond->IsEqual()) {
+ not_throws = ifs->IfFalseSuccessor();
+ } else if (throws == ifs->IfFalseSuccessor() && cond->IsNotEqual()) {
+ not_throws = ifs->IfTrueSuccessor();
+ } else {
+ return false;
+ }
+ DCHECK(cond->IsEqual() || cond->IsNotEqual());
+ HInstruction* obj = cond->InputAt(1);
+ if (obj->IsNullConstant()) {
+ obj = cond->InputAt(0);
+ } else if (!cond->InputAt(0)->IsNullConstant()) {
+ return false;
+ }
+ // Scan all uses of obj and find null check under control dependence.
+ HBoundType* bound = nullptr;
+ const HUseList<HInstruction*>& uses = obj->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end;) {
+ HInstruction* user = it->GetUser();
+ ++it; // increment before possibly replacing
+ if (user->IsNullCheck()) {
+ HBasicBlock* user_block = user->GetBlock();
+ if (user_block != block &&
+ user_block != throws &&
+ block->Dominates(user_block)) {
+ if (bound == nullptr) {
+ ReferenceTypeInfo ti = obj->GetReferenceTypeInfo();
+ bound = new (obj->GetBlock()->GetGraph()->GetAllocator()) HBoundType(obj);
+ bound->SetUpperBound(ti, /*can_be_null*/ false);
+ bound->SetReferenceTypeInfo(ti);
+ bound->SetCanBeNull(false);
+ not_throws->InsertInstructionBefore(bound, not_throws->GetFirstInstruction());
+ }
+ user->ReplaceWith(bound);
+ user_block->RemoveInstruction(user);
+ }
+ }
+ }
+ return bound != nullptr;
+}
+
+// Simplify the pattern:
+//
+// B1
+// / \
+// | foo() // always throws
+// \ goto B2
+// \ /
+// B2
+//
+// Into:
+//
+// B1
+// / \
+// | foo()
+// | goto Exit
+// | |
+// B2 Exit
+//
+// Rationale:
+// Removal of the never taken edge to B2 may expose
+// other optimization opportunities, such as code sinking.
+bool HDeadCodeElimination::SimplifyAlwaysThrows() {
+ // Make sure exceptions go to exit.
+ if (graph_->HasTryCatch()) {
+ return false;
+ }
+ HBasicBlock* exit = graph_->GetExitBlock();
+ if (exit == nullptr) {
+ return false;
+ }
+
+ bool rerun_dominance_and_loop_analysis = false;
+
+ // Order does not matter, just pick one.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ HInstruction* first = block->GetFirstInstruction();
+ HInstruction* last = block->GetLastInstruction();
+ // Ensure only one throwing instruction appears before goto.
+ if (first->AlwaysThrows() &&
+ first->GetNext() == last &&
+ last->IsGoto() &&
+ block->GetPhis().IsEmpty() &&
+ block->GetPredecessors().size() == 1u) {
+ DCHECK_EQ(block->GetSuccessors().size(), 1u);
+ HBasicBlock* pred = block->GetSinglePredecessor();
+ HBasicBlock* succ = block->GetSingleSuccessor();
+ // Ensure no computations are merged through throwing block.
+ // This does not prevent the optimization per se, but would
+ // require an elaborate clean up of the SSA graph.
+ if (succ != exit &&
+ !block->Dominates(pred) &&
+ pred->Dominates(succ) &&
+ succ->GetPredecessors().size() > 1u &&
+ succ->GetPhis().IsEmpty()) {
+ block->ReplaceSuccessor(succ, exit);
+ rerun_dominance_and_loop_analysis = true;
+ MaybeRecordStat(stats_, MethodCompilationStat::kSimplifyThrowingInvoke);
+ // Perform a quick follow up optimization on object != null control dependences
+ // that is much cheaper to perform now than in a later phase.
+ if (RemoveNonNullControlDependences(pred, block)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedNullCheck);
+ }
+ }
+ }
+ }
+
+ // We need to re-analyze the graph in order to run DCE afterwards.
+ if (rerun_dominance_and_loop_analysis) {
+ graph_->ClearLoopInformation();
+ graph_->ClearDominanceInformation();
+ graph_->BuildDominatorTree();
+ return true;
+ }
+ return false;
+}
+
// Simplify the pattern:
//
// B1 B2 ...
@@ -381,6 +516,7 @@ void HDeadCodeElimination::Run() {
// Simplify graph to generate more dead block patterns.
ConnectSuccessiveBlocks();
bool did_any_simplification = false;
+ did_any_simplification |= SimplifyAlwaysThrows();
did_any_simplification |= SimplifyIfs();
did_any_simplification |= RemoveDeadBlocks();
if (did_any_simplification) {
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 84fd890eee..92a7f562e1 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -40,6 +40,7 @@ class HDeadCodeElimination : public HOptimization {
void MaybeRecordSimplifyIf();
bool RemoveDeadBlocks();
void RemoveDeadInstructions();
+ bool SimplifyAlwaysThrows();
bool SimplifyIfs();
void ConnectSuccessiveBlocks();
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 929572ee3b..adb6ce1187 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -29,12 +29,12 @@ namespace art {
class DeadCodeEliminationTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data,
+ void TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after);
};
-void DeadCodeEliminationTest::TestCode(const uint16_t* data,
+void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after) {
HGraph* graph = CreateCFG(data);
@@ -73,7 +73,7 @@ void DeadCodeEliminationTest::TestCode(const uint16_t* data,
* return-void 7. return
*/
TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::IF_GEZ | 1 << 8, 3,
@@ -135,7 +135,7 @@ TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
* return 13. return-void
*/
TEST_F(DeadCodeEliminationTest, AdditionsAndInconditionalJumps) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 572466eec8..1d72ba116e 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -26,10 +26,12 @@ namespace art {
class OptimizerTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length);
+ void TestCode(const std::vector<uint16_t>& data, const uint32_t* blocks, size_t blocks_length);
};
-void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
+void OptimizerTest::TestCode(const std::vector<uint16_t>& data,
+ const uint32_t* blocks,
+ size_t blocks_length) {
HGraph* graph = CreateCFG(data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
@@ -49,7 +51,7 @@ void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_
}
TEST_F(OptimizerTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID); // Block number 1
const uint32_t dominators[] = {
@@ -62,7 +64,7 @@ TEST_F(OptimizerTest, ReturnVoid) {
}
TEST_F(OptimizerTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::RETURN_VOID); // Block number 2
@@ -77,7 +79,7 @@ TEST_F(OptimizerTest, CFG1) {
}
TEST_F(OptimizerTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::GOTO | 0x100, // Block number 2
Instruction::RETURN_VOID); // Block number 3
@@ -94,7 +96,7 @@ TEST_F(OptimizerTest, CFG2) {
}
TEST_F(OptimizerTest, CFG3) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200, // Block number 1
Instruction::RETURN_VOID, // Block number 2
Instruction::GOTO | 0xFF00); // Block number 3
@@ -109,14 +111,14 @@ TEST_F(OptimizerTest, CFG3) {
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -125,7 +127,7 @@ TEST_F(OptimizerTest, CFG3) {
}
TEST_F(OptimizerTest, CFG4) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -138,14 +140,14 @@ TEST_F(OptimizerTest, CFG4) {
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data2, dominators, sizeof(dominators) / sizeof(int));
}
TEST_F(OptimizerTest, CFG5) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID, // Block number 1
Instruction::GOTO | 0x100, // Dead block
Instruction::GOTO | 0xFE00); // Block number 2
@@ -162,7 +164,7 @@ TEST_F(OptimizerTest, CFG5) {
}
TEST_F(OptimizerTest, CFG6) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -181,7 +183,7 @@ TEST_F(OptimizerTest, CFG6) {
}
TEST_F(OptimizerTest, CFG7) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x100, // Block number 2
@@ -201,7 +203,7 @@ TEST_F(OptimizerTest, CFG7) {
}
TEST_F(OptimizerTest, CFG8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x200, // Block number 2
@@ -222,7 +224,7 @@ TEST_F(OptimizerTest, CFG8) {
}
TEST_F(OptimizerTest, CFG9) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x200, // Block number 2
@@ -243,7 +245,7 @@ TEST_F(OptimizerTest, CFG9) {
}
TEST_F(OptimizerTest, CFG10) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6, // Block number 1
Instruction::IF_EQ, 3, // Block number 2
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index b799fb4688..75b8e9609e 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -31,7 +31,7 @@ class FindLoopsTest : public OptimizingUnitTest {};
TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -42,7 +42,7 @@ TEST_F(FindLoopsTest, CFG1) {
}
TEST_F(FindLoopsTest, CFG2) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -53,7 +53,7 @@ TEST_F(FindLoopsTest, CFG2) {
}
TEST_F(FindLoopsTest, CFG3) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -67,7 +67,7 @@ TEST_F(FindLoopsTest, CFG3) {
}
TEST_F(FindLoopsTest, CFG4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -82,7 +82,7 @@ TEST_F(FindLoopsTest, CFG4) {
}
TEST_F(FindLoopsTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -126,7 +126,7 @@ TEST_F(FindLoopsTest, Loop1) {
// while (a == a) {
// }
// return;
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0xFE00,
@@ -150,7 +150,7 @@ TEST_F(FindLoopsTest, Loop2) {
// while (a == a) {
// }
// return a;
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x400,
Instruction::IF_EQ, 4,
@@ -173,7 +173,7 @@ TEST_F(FindLoopsTest, Loop2) {
TEST_F(FindLoopsTest, Loop3) {
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -197,7 +197,7 @@ TEST_F(FindLoopsTest, Loop3) {
TEST_F(FindLoopsTest, Loop4) {
// Test loop with originally two back edges.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -221,7 +221,7 @@ TEST_F(FindLoopsTest, Loop4) {
TEST_F(FindLoopsTest, Loop5) {
// Test loop with two exit edges.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -244,7 +244,7 @@ TEST_F(FindLoopsTest, Loop5) {
}
TEST_F(FindLoopsTest, InnerLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -273,7 +273,7 @@ TEST_F(FindLoopsTest, InnerLoop) {
}
TEST_F(FindLoopsTest, TwoLoops) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0xFE00, // first loop
@@ -301,7 +301,7 @@ TEST_F(FindLoopsTest, TwoLoops) {
}
TEST_F(FindLoopsTest, NonNaturalLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x0100,
@@ -317,7 +317,7 @@ TEST_F(FindLoopsTest, NonNaturalLoop) {
}
TEST_F(FindLoopsTest, DoWhileLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0xFFFF,
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index b1ac027a68..c88baa8610 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -31,7 +31,15 @@ namespace art {
using android::base::StringPrintf;
static bool IsAllowedToJumpToExitBlock(HInstruction* instruction) {
- return instruction->IsThrow() || instruction->IsReturn() || instruction->IsReturnVoid();
+ // Anything that returns is allowed to jump into the exit block.
+ if (instruction->IsReturn() || instruction->IsReturnVoid()) {
+ return true;
+ }
+ // Anything that always throws is allowed to jump into the exit block.
+ if (instruction->IsGoto() && instruction->GetPrevious() != nullptr) {
+ instruction = instruction->GetPrevious();
+ }
+ return instruction->AlwaysThrows();
}
static bool IsExitTryBoundaryIntoExitBlock(HBasicBlock* block) {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 9ca3e4953a..08bfa5d80f 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -22,7 +22,7 @@ namespace art {
class GraphCheckerTest : public OptimizingUnitTest {
protected:
HGraph* CreateSimpleCFG();
- void TestCode(const uint16_t* data);
+ void TestCode(const std::vector<uint16_t>& data);
};
/**
@@ -48,7 +48,7 @@ HGraph* GraphCheckerTest::CreateSimpleCFG() {
return graph;
}
-void GraphCheckerTest::TestCode(const uint16_t* data) {
+void GraphCheckerTest::TestCode(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
@@ -58,14 +58,14 @@ void GraphCheckerTest::TestCode(const uint16_t* data) {
}
TEST_F(GraphCheckerTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
TestCode(data);
}
TEST_F(GraphCheckerTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -73,7 +73,7 @@ TEST_F(GraphCheckerTest, CFG1) {
}
TEST_F(GraphCheckerTest, CFG2) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -83,7 +83,7 @@ TEST_F(GraphCheckerTest, CFG2) {
}
TEST_F(GraphCheckerTest, CFG3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -128,7 +128,7 @@ TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
TEST_F(GraphCheckerTest, SSAPhi) {
// This code creates one Phi function during the conversion to SSA form.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 12c69889ab..6144162f68 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -533,20 +533,9 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitVecHalvingAdd(HVecHalvingAdd* hadd) OVERRIDE {
VisitVecBinaryOperation(hadd);
- StartAttributeStream("unsigned") << std::boolalpha << hadd->IsUnsigned() << std::noboolalpha;
StartAttributeStream("rounded") << std::boolalpha << hadd->IsRounded() << std::noboolalpha;
}
- void VisitVecMin(HVecMin* min) OVERRIDE {
- VisitVecBinaryOperation(min);
- StartAttributeStream("unsigned") << std::boolalpha << min->IsUnsigned() << std::noboolalpha;
- }
-
- void VisitVecMax(HVecMax* max) OVERRIDE {
- VisitVecBinaryOperation(max);
- StartAttributeStream("unsigned") << std::boolalpha << max->IsUnsigned() << std::noboolalpha;
- }
-
void VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) OVERRIDE {
VisitVecOperation(instruction);
StartAttributeStream("kind") << instruction->GetOpKind();
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index b2ad8ec400..035e5ce3e1 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -392,6 +392,35 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
+static bool AlwaysThrows(ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CodeItemDataAccessor accessor(method->DexInstructionData());
+ // Skip native methods, methods with try blocks, and methods that are too large.
+ if (!accessor.HasCodeItem() ||
+ accessor.TriesSize() != 0 ||
+ accessor.InsnsSizeInCodeUnits() > kMaximumNumberOfTotalInstructions) {
+ return false;
+ }
+ // Scan for exits.
+ bool throw_seen = false;
+ for (const DexInstructionPcPair& pair : accessor) {
+ switch (pair.Inst().Opcode()) {
+ case Instruction::RETURN:
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN_WIDE:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_VOID_NO_BARRIER:
+ return false; // found regular control flow back
+ case Instruction::THROW:
+ throw_seen = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return throw_seen;
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved() ||
invoke_instruction->IsInvokePolymorphic()) {
@@ -431,20 +460,29 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
}
if (actual_method != nullptr) {
+ // Single target.
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
ReferenceTypeInfo::CreateInvalid(),
/* do_rtp */ true,
cha_devirtualize);
- if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
- if (cha_devirtualize) {
- // Add dependency due to devirtulization. We've assumed resolved_method
- // has single implementation.
- outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
- MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
- } else {
- MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
+ if (result) {
+ // Successfully inlined.
+ if (!invoke_instruction->IsInvokeStaticOrDirect()) {
+ if (cha_devirtualize) {
+ // Add dependency due to devirtualization. We've assumed resolved_method
+ // has single implementation.
+ outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+ MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
+ } else {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
+ }
}
+ } else if (!cha_devirtualize && AlwaysThrows(actual_method)) {
+ // Set always throws property for non-inlined method call with single target
+ // (unless it was obtained through CHA, because that would imply we have
+ // to add the CHA dependency, which seems not worth it).
+ invoke_instruction->SetAlwaysThrows(true);
}
return result;
}
@@ -1381,7 +1419,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
bool same_dex_file = IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *method->GetDexFile());
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
if (!accessor.HasCodeItem()) {
LOG_FAIL_NO_STAT()
@@ -1660,7 +1698,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
- CodeItemDebugInfoAccessor code_item_accessor(callee_dex_file, code_item);
+ CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
caller_compilation_unit_.GetDexCache(),
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index e81d97b0a8..02465d37ba 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_OPTIMIZING_INLINER_H_
#include "dex/dex_file_types.h"
-#include "invoke_type.h"
+#include "dex/invoke_type.h"
#include "jit/profile_compilation_info.h"
#include "optimization.h"
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 72a93c1f77..64a1eccf60 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -49,7 +49,7 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
VariableSizedHandleScope* handles,
ScopedArenaAllocator* local_allocator)
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 708a09711a..4428c53277 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
+#include "base/array_ref.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "data_type.h"
@@ -57,7 +58,7 @@ class HInstructionBuilder : public ValueObject {
const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
VariableSizedHandleScope* handles,
ScopedArenaAllocator* local_allocator);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 6928b70df7..acb830e524 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -19,9 +19,9 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "class_linker.h"
+#include "dex/invoke_type.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
-#include "invoke_type.h"
#include "mirror/dex_cache-inl.h"
#include "nodes.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ca1b451e6b..2f8e33f941 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2011,6 +2011,14 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderARM64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 99b8b5df74..830d0403e4 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2811,6 +2811,14 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 113c9de5a2..cafa5228d9 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2835,6 +2835,15 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+// static double java.lang.Math.pow(double y, double x)
+void IntrinsicLocationsBuilderMIPS::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 521bad27e2..89f1818be2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2416,6 +2416,15 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+// static double java.lang.Math.pow(double y, double x)
+void IntrinsicLocationsBuilderMIPS64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index baa410b884..46b7f3f1ce 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1105,6 +1105,14 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderX86::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6dd8b8e1f5..6483b7cb2a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -897,6 +897,14 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderX86_64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 43b63a73ef..9fa5b74c62 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -35,11 +35,12 @@ namespace art {
class LinearizeTest : public OptimizingUnitTest {
protected:
template <size_t number_of_blocks>
- void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]);
+ void TestCode(const std::vector<uint16_t>& data,
+ const uint32_t (&expected_order)[number_of_blocks]);
};
template <size_t number_of_blocks>
-void LinearizeTest::TestCode(const uint16_t* data,
+void LinearizeTest::TestCode(const std::vector<uint16_t>& data,
const uint32_t (&expected_order)[number_of_blocks]) {
HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
@@ -68,7 +69,7 @@ TEST_F(LinearizeTest, CFG1) {
// + / \ +
// Block4 Block8
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 5,
Instruction::IF_EQ, 0xFFFE,
@@ -93,7 +94,7 @@ TEST_F(LinearizeTest, CFG2) {
// + / \ +
// Block5 Block8
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::RETURN_VOID,
@@ -119,7 +120,7 @@ TEST_F(LinearizeTest, CFG3) {
// Block6 + Block9
// | +
// Block4 ++
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::RETURN_VOID,
@@ -149,7 +150,7 @@ TEST_F(LinearizeTest, CFG4) {
// + / \ +
// Block5 Block11
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 7,
Instruction::IF_EQ, 0xFFFE,
@@ -179,7 +180,7 @@ TEST_F(LinearizeTest, CFG5) {
// +/ \ +
// Block6 Block11
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::RETURN_VOID,
@@ -205,7 +206,7 @@ TEST_F(LinearizeTest, CFG6) {
// Block5 <- Block9 Block6 +
// |
// Block7
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0x0004,
@@ -233,7 +234,7 @@ TEST_F(LinearizeTest, CFG7) {
// |
// Block7
//
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0x0005,
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index e45d7c820c..66660662e4 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -31,10 +31,10 @@ namespace art {
class LiveRangesTest : public OptimizingUnitTest {
public:
- HGraph* BuildGraph(const uint16_t* data);
+ HGraph* BuildGraph(const std::vector<uint16_t>& data);
};
-HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) {
+HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
@@ -57,7 +57,7 @@ TEST_F(LiveRangesTest, CFG1) {
* |
* 12: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -102,7 +102,7 @@ TEST_F(LiveRangesTest, CFG2) {
* |
* 26: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -151,7 +151,7 @@ TEST_F(LiveRangesTest, CFG3) {
* |
* 28: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -225,7 +225,7 @@ TEST_F(LiveRangesTest, Loop1) {
* 30: exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -304,7 +304,7 @@ TEST_F(LiveRangesTest, Loop2) {
* We want to make sure the phi at 10 has a lifetime hole after the add at 20.
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::ADD_INT, 0, 0,
@@ -378,7 +378,7 @@ TEST_F(LiveRangesTest, CFG4) {
*
* We want to make sure the constant0 has a lifetime hole after the 16: add.
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::IF_EQ, 5,
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 35bc4ff8b3..6621a03568 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -31,7 +31,7 @@ namespace art {
class LivenessTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
static void DumpBitVector(BitVector* vector,
@@ -46,7 +46,7 @@ static void DumpBitVector(BitVector* vector,
buffer << ")\n";
}
-void LivenessTest::TestCode(const uint16_t* data, const char* expected) {
+void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
@@ -86,7 +86,7 @@ TEST_F(LivenessTest, CFG1) {
" kill: (0)\n";
// Constant is not used.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -108,7 +108,7 @@ TEST_F(LivenessTest, CFG2) {
" live out: (0)\n"
" kill: (0)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -134,7 +134,7 @@ TEST_F(LivenessTest, CFG3) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -181,7 +181,7 @@ TEST_F(LivenessTest, CFG4) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -228,7 +228,7 @@ TEST_F(LivenessTest, CFG5) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -273,7 +273,7 @@ TEST_F(LivenessTest, Loop1) {
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -318,7 +318,7 @@ TEST_F(LivenessTest, Loop3) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -370,7 +370,7 @@ TEST_F(LivenessTest, Loop4) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x500,
Instruction::IF_EQ, 5,
@@ -425,7 +425,7 @@ TEST_F(LivenessTest, Loop5) {
" live out: (0001)\n"
" kill: (0001)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -475,7 +475,7 @@ TEST_F(LivenessTest, Loop6) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -530,7 +530,7 @@ TEST_F(LivenessTest, Loop7) {
" live out: (00000)\n"
" kill: (00000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -580,7 +580,7 @@ TEST_F(LivenessTest, Loop8) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::ADD_INT, 0, 0,
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 88326d321b..8b4eae1780 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -25,6 +25,51 @@
#include <iostream>
+/**
+ * The general algorithm of load-store elimination (LSE).
+ * Load-store analysis in the previous pass collects a list of heap locations
+ * and does alias analysis of those heap locations.
+ * LSE keeps track of a list of heap values corresponding to the heap
+ * locations. It visits basic blocks in reverse post order and for
+ * each basic block, visits instructions sequentially, and processes
+ * instructions as follows:
+ * - If the instruction is a load, and the heap location for that load has a
+ * valid heap value, the load can be eliminated. In order to maintain the
+ * validity of all heap locations during the optimization phase, the real
+ * elimination is delayed till the end of LSE.
+ * - If the instruction is a store, it updates the heap value for the heap
+ * location of the store with the store instruction. The real heap value
+ * can be fetched from the store instruction. Heap values are invalidated
+ * for heap locations that may alias with the store instruction's heap
+ * location. The store instruction can be eliminated unless the value stored
+ * is later needed e.g. by a load from the same/aliased heap location or
+ * the heap location persists at method return/deoptimization.
+ * The store instruction is also needed if it's not used to track the heap
+ * value anymore, e.g. when it fails to merge with the heap values from other
+ * predecessors.
+ * - A store that stores the same value as the heap value is eliminated.
+ * - The list of heap values are merged at basic block entry from the basic
+ * block's predecessors. The algorithm is single-pass, so loop side-effects is
+ * used as best effort to decide if a heap location is stored inside the loop.
+ * - A special type of objects called singletons are instantiated in the method
+ * and have a single name, i.e. no aliases. Singletons have exclusive heap
+ * locations since they have no aliases. Singletons are helpful in narrowing
+ * down the life span of a heap location such that they do not always
+ * need to participate in merging heap values. Allocation of a singleton
+ * can be eliminated if that singleton is not used and does not persist
+ * at method return/deoptimization.
+ * - For newly instantiated instances, their heap values are initialized to
+ * language defined default values.
+ * - Some instructions such as invokes are treated as loading and invalidating
+ * all the heap values, depending on the instruction's side effects.
+ * - Finalizable objects are considered as persisting at method
+ * return/deoptimization.
+ * - Currently this LSE algorithm doesn't handle SIMD graph, e.g. with VecLoad
+ * and VecStore instructions.
+ * - Currently this LSE algorithm doesn't handle graph with try-catch, due to
+ * the special block merging structure.
+ */
+
namespace art {
// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
@@ -59,8 +104,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
removed_loads_(allocator_.Adapter(kArenaAllocLSE)),
substitute_instructions_for_loads_(allocator_.Adapter(kArenaAllocLSE)),
possibly_removed_stores_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(allocator_.Adapter(kArenaAllocLSE)) {
+ singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -88,19 +132,26 @@ class LSEVisitor : public HGraphDelegateVisitor {
return type_conversion;
}
- // Find an instruction's substitute if it should be removed.
+ // Find an instruction's substitute if it's a removed load.
// Return the same instruction if it should not be removed.
HInstruction* FindSubstitute(HInstruction* instruction) {
+ if (!IsLoad(instruction)) {
+ return instruction;
+ }
size_t size = removed_loads_.size();
for (size_t i = 0; i < size; i++) {
if (removed_loads_[i] == instruction) {
- return substitute_instructions_for_loads_[i];
+ HInstruction* substitute = substitute_instructions_for_loads_[i];
+ // The substitute list is a flat hierarchy.
+ DCHECK_EQ(FindSubstitute(substitute), substitute);
+ return substitute;
}
}
return instruction;
}
void AddRemovedLoad(HInstruction* load, HInstruction* heap_value) {
+ DCHECK(IsLoad(load));
DCHECK_EQ(FindSubstitute(heap_value), heap_value) <<
"Unexpected heap_value that has a substitute " << heap_value->DebugName();
removed_loads_.push_back(load);
@@ -207,28 +258,59 @@ class LSEVisitor : public HGraphDelegateVisitor {
new_instance->GetBlock()->RemoveInstruction(new_instance);
}
}
- for (HInstruction* new_array : singleton_new_arrays_) {
- size_t removed = HConstructorFence::RemoveConstructorFences(new_array);
- MaybeRecordStat(stats_,
- MethodCompilationStat::kConstructorFenceRemovedLSE,
- removed);
+ }
- if (!new_array->HasNonEnvironmentUses()) {
- new_array->RemoveEnvironmentUsers();
- new_array->GetBlock()->RemoveInstruction(new_array);
- }
+ private:
+ static bool IsLoad(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
}
+ // Unresolved load is not treated as a load.
+ return instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet();
}
- private:
- // If heap_values[index] is an instance field store, need to keep the store.
- // This is necessary if a heap value is killed due to merging, or loop side
- // effects (which is essentially merging also), since a load later from the
- // location won't be eliminated.
+ static bool IsStore(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
+ }
+ // Unresolved store is not treated as a store.
+ return instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->IsStaticFieldSet();
+ }
+
+ // Returns the real heap value by finding its substitute or by "peeling"
+ // a store instruction.
+ HInstruction* GetRealHeapValue(HInstruction* heap_value) {
+ if (IsLoad(heap_value)) {
+ return FindSubstitute(heap_value);
+ }
+ if (!IsStore(heap_value)) {
+ return heap_value;
+ }
+
+ // We keep track of store instructions as the heap values which might be
+ // eliminated if the stores are later found not necessary. The real stored
+ // value needs to be fetched from the store instruction.
+ if (heap_value->IsInstanceFieldSet()) {
+ heap_value = heap_value->AsInstanceFieldSet()->GetValue();
+ } else if (heap_value->IsStaticFieldSet()) {
+ heap_value = heap_value->AsStaticFieldSet()->GetValue();
+ } else {
+ DCHECK(heap_value->IsArraySet());
+ heap_value = heap_value->AsArraySet()->GetValue();
+ }
+ // heap_value may already be a removed load.
+ return FindSubstitute(heap_value);
+ }
+
+ // If heap_value is a store, need to keep the store.
+ // This is necessary if a heap value is killed or replaced by another value,
+ // so that the store is no longer used to track heap value.
void KeepIfIsStore(HInstruction* heap_value) {
- if (heap_value == kDefaultHeapValue ||
- heap_value == kUnknownHeapValue ||
- !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
+ if (!IsStore(heap_value)) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -239,26 +321,41 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
+ // If a heap location X may alias with heap location at `loc_index`
+ // and heap_values of that heap location X holds a store, keep that store.
+ // It's needed for a dependent load that's not eliminated since any store
+ // that may put value into the load's heap location needs to be kept.
+ void KeepStoresIfAliasedToLocation(ScopedArenaVector<HInstruction*>& heap_values,
+ size_t loc_index) {
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ if ((i == loc_index) || heap_location_collector_.MayAlias(i, loc_index)) {
+ KeepIfIsStore(heap_values[i]);
+ }
+ }
+ }
+
void HandleLoopSideEffects(HBasicBlock* block) {
DCHECK(block->IsLoopHeader());
int block_id = block->GetBlockId();
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ ScopedArenaVector<HInstruction*>& pre_header_heap_values =
+ heap_values_for_[pre_header->GetBlockId()];
- // Don't eliminate loads in irreducible loops. This is safe for singletons, because
- // they are always used by the non-eliminated loop-phi.
+ // Don't eliminate loads in irreducible loops.
+ // Also keep the stores before the loop.
if (block->GetLoopInformation()->IsIrreducible()) {
if (kIsDebugBuild) {
for (size_t i = 0; i < heap_values.size(); i++) {
DCHECK_EQ(heap_values[i], kUnknownHeapValue);
}
}
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ KeepIfIsStore(pre_header_heap_values[i]);
+ }
return;
}
- HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
- ScopedArenaVector<HInstruction*>& pre_header_heap_values =
- heap_values_for_[pre_header->GetBlockId()];
-
// Inherit the values from pre-header.
for (size_t i = 0; i < heap_values.size(); i++) {
heap_values[i] = pre_header_heap_values[i];
@@ -270,18 +367,17 @@ class LSEVisitor : public HGraphDelegateVisitor {
for (size_t i = 0; i < heap_values.size(); i++) {
HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
ReferenceInfo* ref_info = location->GetReferenceInfo();
- if (ref_info->IsSingletonAndRemovable() &&
- !location->IsValueKilledByLoopSideEffects()) {
- // A removable singleton's field that's not stored into inside a loop is
+ if (ref_info->IsSingleton() && !location->IsValueKilledByLoopSideEffects()) {
+ // A singleton's field that's not stored into inside a loop is
// invariant throughout the loop. Nothing to do.
} else {
- // heap value is killed by loop side effects (stored into directly, or
- // due to aliasing). Or the heap value may be needed after method return
- // or deoptimization.
+ // heap value is killed by loop side effects.
KeepIfIsStore(pre_header_heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
+ } else {
+ // The loop doesn't kill any value.
}
}
@@ -300,45 +396,73 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HInstruction* merged_value = nullptr;
+ // If we can merge the store itself from the predecessors, we keep
+ // the store as the heap value as long as possible. In case we cannot
+ // merge the store, we try to merge the values of the stores.
+ HInstruction* merged_store_value = nullptr;
// Whether merged_value is a result that's merged from all predecessors.
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ HInstruction* ref = ref_info->GetReference();
HInstruction* singleton_ref = nullptr;
if (ref_info->IsSingleton()) {
- // We do more analysis of liveness when merging heap values for such
- // cases since stores into such references may potentially be eliminated.
- singleton_ref = ref_info->GetReference();
+ // We do more analysis based on singleton's liveness when merging
+ // heap values for such cases.
+ singleton_ref = ref;
}
for (HBasicBlock* predecessor : predecessors) {
HInstruction* pred_value = heap_values_for_[predecessor->GetBlockId()][i];
+ if (!IsStore(pred_value)) {
+ pred_value = FindSubstitute(pred_value);
+ }
+ DCHECK(pred_value != nullptr);
+ HInstruction* pred_store_value = GetRealHeapValue(pred_value);
if ((singleton_ref != nullptr) &&
!singleton_ref->GetBlock()->Dominates(predecessor)) {
- // singleton_ref is not live in this predecessor. Skip this predecessor since
- // it does not really have the location.
+ // singleton_ref is not live in this predecessor. No need to merge
+ // since singleton_ref is not live at the beginning of this block.
DCHECK_EQ(pred_value, kUnknownHeapValue);
from_all_predecessors = false;
- continue;
+ break;
}
if (merged_value == nullptr) {
// First seen heap value.
+ DCHECK(pred_value != nullptr);
merged_value = pred_value;
} else if (pred_value != merged_value) {
// There are conflicting values.
merged_value = kUnknownHeapValue;
+ // We may still be able to merge store values.
+ }
+
+ // Conflicting stores may be storing the same value. We do another merge
+ // of real stored values.
+ if (merged_store_value == nullptr) {
+ // First seen store value.
+ DCHECK(pred_store_value != nullptr);
+ merged_store_value = pred_store_value;
+ } else if (pred_store_value != merged_store_value) {
+ // There are conflicting store values.
+ merged_store_value = kUnknownHeapValue;
+ // There must be conflicting stores also.
+ DCHECK_EQ(merged_value, kUnknownHeapValue);
+ // No need to merge anymore.
break;
}
}
- if (ref_info->IsSingleton()) {
- if (ref_info->IsSingletonAndNonRemovable() ||
- (merged_value == kUnknownHeapValue &&
- !block->IsSingleReturnOrReturnVoidAllowingPhis())) {
- // The heap value may be needed after method return or deoptimization,
- // or there are conflicting heap values from different predecessors and
- // this block is not a single return,
- // keep the last store in each predecessor since future loads may not
- // be eliminated.
+ if (merged_value == nullptr) {
+ DCHECK(!from_all_predecessors);
+ DCHECK(singleton_ref != nullptr);
+ }
+ if (from_all_predecessors) {
+ if (ref_info->IsSingletonAndRemovable() &&
+ block->IsSingleReturnOrReturnVoidAllowingPhis()) {
+ // Values in the singleton are not needed anymore.
+ } else if (!IsStore(merged_value)) {
+ // We don't track merged value as a store anymore. We have to
+ // hold the stores in predecessors live here.
for (HBasicBlock* predecessor : predecessors) {
ScopedArenaVector<HInstruction*>& pred_values =
heap_values_for_[predecessor->GetBlockId()];
@@ -346,18 +470,33 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
} else {
- // Currenctly we don't eliminate stores to non-singletons.
+ DCHECK(singleton_ref != nullptr);
+ // singleton_ref is non-existing at the beginning of the block. There is
+ // no need to keep the stores.
}
- if ((merged_value == nullptr) || !from_all_predecessors) {
+ if (!from_all_predecessors) {
DCHECK(singleton_ref != nullptr);
DCHECK((singleton_ref->GetBlock() == block) ||
- !singleton_ref->GetBlock()->Dominates(block));
+ !singleton_ref->GetBlock()->Dominates(block))
+ << "method: " << GetGraph()->GetMethodName();
// singleton_ref is not defined before block or defined only in some of its
// predecessors, so block doesn't really have the location at its entry.
heap_values[i] = kUnknownHeapValue;
- } else {
+ } else if (predecessors.size() == 1) {
+ // Inherit heap value from the single predecessor.
+ DCHECK_EQ(heap_values_for_[predecessors[0]->GetBlockId()][i], merged_value);
heap_values[i] = merged_value;
+ } else {
+ DCHECK(merged_value == kUnknownHeapValue ||
+ merged_value == kDefaultHeapValue ||
+ merged_value->GetBlock()->Dominates(block));
+ if (merged_value != kUnknownHeapValue) {
+ heap_values[i] = merged_value;
+ } else {
+ // Stores in different predecessors may be storing the same value.
+ heap_values[i] = merged_store_value;
+ }
}
}
}
@@ -423,23 +562,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
heap_values[idx] = constant;
return;
}
- if (heap_value != kUnknownHeapValue) {
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- HInstruction* store = heap_value;
- // This load must be from a singleton since it's from the same
- // field/element that a "removed" store puts the value. That store
- // must be to a singleton's field/element.
- DCHECK(ref_info->IsSingleton());
- // Get the real heap value of the store.
- heap_value = heap_value->IsInstanceFieldSet() ? store->InputAt(1) : store->InputAt(2);
- // heap_value may already have a substitute.
- heap_value = FindSubstitute(heap_value);
- }
- }
+ heap_value = GetRealHeapValue(heap_value);
if (heap_value == kUnknownHeapValue) {
// Load isn't eliminated. Put the load as the value into the HeapLocation.
// This acts like GVN but with better aliasing analysis.
heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
} else {
if (DataType::Kind(heap_value->GetType()) != DataType::Kind(instruction->GetType())) {
// The only situation where the same heap location has different type is when
@@ -452,6 +580,10 @@ class LSEVisitor : public HGraphDelegateVisitor {
DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName();
DCHECK(instruction->IsArrayGet()) << instruction->DebugName();
}
+ // Load isn't eliminated. Put the load as the value into the HeapLocation.
+ // This acts like GVN but with better aliasing analysis.
+ heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
return;
}
AddRemovedLoad(instruction, heap_value);
@@ -460,12 +592,21 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
bool Equal(HInstruction* heap_value, HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
+ if (heap_value == kUnknownHeapValue) {
+ // Don't compare kUnknownHeapValue with other values.
+ return false;
+ }
if (heap_value == value) {
return true;
}
if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
return true;
}
+ HInstruction* real_heap_value = GetRealHeapValue(heap_value);
+ if (real_heap_value != heap_value) {
+ return Equal(real_heap_value, value);
+ }
return false;
}
@@ -476,6 +617,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
size_t vector_length,
int16_t declaring_class_def_index,
HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
// value may already have a substitute.
value = FindSubstitute(value);
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
@@ -486,59 +628,47 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
HInstruction* heap_value = heap_values[idx];
- bool same_value = false;
bool possibly_redundant = false;
+
if (Equal(heap_value, value)) {
// Store into the heap location with the same value.
- same_value = true;
- } else if (index != nullptr &&
- heap_location_collector_.GetHeapLocation(idx)->HasAliasedLocations()) {
- // For array element, don't eliminate stores if the location can be aliased
- // (due to either ref or index aliasing).
- } else if (ref_info->IsSingleton()) {
- // Store into a field/element of a singleton. The value cannot be killed due to
- // aliasing/invocation. It can be redundant since future loads can
- // directly get the value set by this instruction. The value can still be killed due to
- // merging or loop side effects. Stores whose values are killed due to merging/loop side
- // effects later will be removed from possibly_removed_stores_ when that is detected.
- // Stores whose values may be needed after method return or deoptimization
- // are also removed from possibly_removed_stores_ when that is detected.
- possibly_redundant = true;
+ // This store can be eliminated right away.
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ } else {
HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
- if (loop_info != nullptr) {
- // instruction is a store in the loop so the loop must does write.
+ if (loop_info == nullptr) {
+ // Store is not in a loop. We try to precisely track the heap value by
+ // the store.
+ possibly_redundant = true;
+ } else if (!loop_info->IsIrreducible()) {
+ // instruction is a store in the loop so the loop must do write.
DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
-
- if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
- DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
- // Keep the store since its value may be needed at the loop header.
- possibly_redundant = false;
- } else {
- // The singleton is created inside the loop. Value stored to it isn't needed at
+ if (ref_info->IsSingleton() && !loop_info->IsDefinedOutOfTheLoop(original_ref)) {
+ // original_ref is created inside the loop. Value stored to it isn't needed at
// the loop header. This is true for outer loops also.
+ possibly_redundant = true;
+ } else {
+ // Keep the store since its value may be needed at the loop header.
}
+ } else {
+ // Keep the store inside irreducible loops.
}
}
- if (same_value || possibly_redundant) {
+ if (possibly_redundant) {
possibly_removed_stores_.push_back(instruction);
}
- if (!same_value) {
- if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
- // Put the store as the heap value. If the value is loaded from heap
- // by a load later, this store isn't really redundant.
- heap_values[idx] = instruction;
- } else {
- heap_values[idx] = value;
- }
- }
+ // Put the store as the heap value. If the value is loaded or needed after
+ // return/deoptimization later, this store isn't really redundant.
+ heap_values[idx] = instruction;
+
// This store may kill values in other heap locations due to aliasing.
for (size_t i = 0; i < heap_values.size(); i++) {
if (i == idx) {
continue;
}
- if (heap_values[i] == value) {
+ if (Equal(heap_values[i], value)) {
// Same value should be kept even if aliasing happens.
continue;
}
@@ -547,7 +677,9 @@ class LSEVisitor : public HGraphDelegateVisitor {
continue;
}
if (heap_location_collector_.MayAlias(i, idx)) {
- // Kill heap locations that may alias.
+ // Kill heap locations that may alias and as a result if the heap value
+ // is a store, the store needs to be kept.
+ KeepIfIsStore(heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
@@ -633,24 +765,35 @@ class LSEVisitor : public HGraphDelegateVisitor {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
- // Filter out fake instructions before checking instruction kind below.
- if (heap_value == kUnknownHeapValue || heap_value == kDefaultHeapValue) {
- continue;
- }
// A store is kept as the heap value for possibly removed stores.
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- // Check whether the reference for a store is used by an environment local of
- // HDeoptimize.
+ // That value stored is generally observeable after deoptimization, except
+ // for singletons that don't escape after deoptimization.
+ if (IsStore(heap_value)) {
+ if (heap_value->IsStaticFieldSet()) {
+ KeepIfIsStore(heap_value);
+ continue;
+ }
HInstruction* reference = heap_value->InputAt(0);
- DCHECK(heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton());
- for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
- HEnvironment* user = use.GetUser();
- if (user->GetHolder() == instruction) {
- // The singleton for the store is visible at this deoptimization
- // point. Need to keep the store so that the heap value is
- // seen by the interpreter.
+ if (heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton()) {
+ if (reference->IsNewInstance() && reference->AsNewInstance()->IsFinalizable()) {
+ // Finalizable objects alway escape.
KeepIfIsStore(heap_value);
+ continue;
+ }
+ // Check whether the reference for a store is used by an environment local of
+ // HDeoptimize. If not, the singleton is not observed after
+ // deoptimizion.
+ for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+ HEnvironment* user = use.GetUser();
+ if (user->GetHolder() == instruction) {
+ // The singleton for the store is visible at this deoptimization
+ // point. Need to keep the store so that the heap value is
+ // seen by the interpreter.
+ KeepIfIsStore(heap_value);
+ }
}
+ } else {
+ KeepIfIsStore(heap_value);
}
}
}
@@ -691,9 +834,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
// Singleton references cannot be seen by the callee.
} else {
if (side_effects.DoesAnyRead()) {
+ // Invocation may read the heap value.
KeepIfIsStore(heap_values[i]);
}
if (side_effects.DoesAnyWrite()) {
+ // Keep the store since it's not used to track the heap value anymore.
+ KeepIfIsStore(heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
@@ -758,7 +904,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
return;
}
if (ref_info->IsSingletonAndRemovable()) {
- singleton_new_arrays_.push_back(new_array);
+ singleton_new_instances_.push_back(new_array);
}
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[new_array->GetBlock()->GetBlockId()];
@@ -791,7 +937,6 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*> possibly_removed_stores_;
ScopedArenaVector<HInstruction*> singleton_new_instances_;
- ScopedArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 3dc1ef7534..899496328e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -30,46 +30,6 @@
namespace art {
-// TODO: Clean up the packed type detection so that we have the right type straight away
-// and do not need to go through this normalization.
-static inline void NormalizePackedType(/* inout */ DataType::Type* type,
- /* inout */ bool* is_unsigned) {
- switch (*type) {
- case DataType::Type::kBool:
- DCHECK(!*is_unsigned);
- break;
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint8;
- } else {
- *type = DataType::Type::kInt8;
- }
- break;
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- if (*is_unsigned) {
- *is_unsigned = false;
- *type = DataType::Type::kUint16;
- } else {
- *type = DataType::Type::kInt16;
- }
- break;
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- // We do not have kUint32 and kUint64 at the moment.
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK(!*is_unsigned);
- break;
- default:
- LOG(FATAL) << "Unexpected type " << *type;
- UNREACHABLE();
- }
-}
-
// Enables vectorization (SIMDization) in the loop optimizer.
static constexpr bool kEnableVectorization = true;
@@ -1362,8 +1322,10 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
if (VectorizeUse(node, r, generate_code, type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&type, &is_unsigned);
- GenerateVecOp(instruction, vector_map_->Get(r), nullptr, type);
+ GenerateVecOp(instruction,
+ vector_map_->Get(r),
+ nullptr,
+ HVecOperation::ToProperType(type, is_unsigned));
}
return true;
}
@@ -1865,18 +1827,26 @@ void HLoopOptimization::GenerateVecOp(HInstruction* org,
case Intrinsics::kMathMinLongLong:
case Intrinsics::kMathMinFloatFloat:
case Intrinsics::kMathMinDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMin(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMin(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
case Intrinsics::kMathMaxIntInt:
case Intrinsics::kMathMaxLongLong:
case Intrinsics::kMathMaxFloatFloat:
case Intrinsics::kMathMaxDoubleDouble: {
- NormalizePackedType(&type, &is_unsigned);
vector = new (global_allocator_)
- HVecMax(global_allocator_, opa, opb, type, vector_length_, is_unsigned, dex_pc);
+ HVecMax(global_allocator_,
+ opa,
+ opb,
+ HVecOperation::ToProperType(type, is_unsigned),
+ vector_length_,
+ dex_pc);
break;
}
default:
@@ -1987,15 +1957,13 @@ bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
VectorizeUse(node, s, generate_code, type, restrictions)) {
if (generate_code) {
if (vector_mode_ == kVector) {
- NormalizePackedType(&type, &is_unsigned);
vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
global_allocator_,
vector_map_->Get(r),
vector_map_->Get(s),
- type,
+ HVecOperation::ToProperType(type, is_unsigned),
vector_length_,
is_rounded,
- is_unsigned,
kNoDexPc));
MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
} else {
@@ -2086,7 +2054,7 @@ bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
if (generate_code) {
- NormalizePackedType(&reduction_type, &is_unsigned);
+ reduction_type = HVecOperation::ToProperType(reduction_type, is_unsigned);
if (vector_mode_ == kVector) {
vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
global_allocator_,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 727431a493..91e475d737 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -865,6 +865,15 @@ void HLoopInformation::Populate() {
graph->SetHasLoops(true);
}
+void HLoopInformation::PopulateInnerLoopUpwards(HLoopInformation* inner_loop) {
+ DCHECK(inner_loop->GetPreHeader()->GetLoopInformation() == this);
+ blocks_.Union(&inner_loop->blocks_);
+ HLoopInformation* outer_loop = GetPreHeader()->GetLoopInformation();
+ if (outer_loop != nullptr) {
+ outer_loop->PopulateInnerLoopUpwards(this);
+ }
+}
+
HBasicBlock* HLoopInformation::GetPreHeader() const {
HBasicBlock* block = header_->GetPredecessors()[0];
DCHECK(irreducible_ || (block == header_->GetDominator()));
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d4382c6b4c..43ca2cf874 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -32,11 +32,11 @@
#include "deoptimization_kind.h"
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
+#include "dex/invoke_type.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
#include "handle_scope.h"
#include "intrinsics_enum.h"
-#include "invoke_type.h"
#include "locations.h"
#include "method_reference.h"
#include "mirror/class.h"
@@ -826,6 +826,10 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
// Finds blocks that are part of this loop.
void Populate();
+ // Updates blocks population of the loop and all of its outer' ones recursively after the
+ // population of the inner loop is updated.
+ void PopulateInnerLoopUpwards(HLoopInformation* inner_loop);
+
// Returns whether this loop information contains `block`.
// Note that this loop information *must* be populated before entering this function.
bool Contains(const HBasicBlock& block) const;
@@ -856,6 +860,12 @@ class HLoopInformation : public ArenaObject<kArenaAllocLoopInfo> {
bool HasExitEdge() const;
+ // Resets back edge and blocks-in-loop data.
+ void ResetBasicBlockData() {
+ back_edges_.clear();
+ ClearAllBlocks();
+ }
+
private:
// Internal recursive implementation of `Populate`.
void PopulateRecursive(HBasicBlock* block);
@@ -998,6 +1008,18 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
loop_information_->AddBackEdge(back_edge);
}
+ // Registers a back edge; if the block was not a loop header before the call associates a newly
+ // created loop info with it.
+ //
+ // Used in SuperblockCloner to preserve LoopInformation object instead of reseting loop
+ // info for all blocks during back edges recalculation.
+ void AddBackEdgeWhileUpdating(HBasicBlock* back_edge) {
+ if (loop_information_ == nullptr || loop_information_->GetHeader() != this) {
+ loop_information_ = new (graph_->GetAllocator()) HLoopInformation(this, graph_);
+ }
+ loop_information_->AddBackEdge(back_edge);
+ }
+
HGraph* GetGraph() const { return graph_; }
void SetGraph(HGraph* graph) { graph_ = graph; }
@@ -2018,6 +2040,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
// could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
+
+ // Does the instruction always throw an exception unconditionally?
+ virtual bool AlwaysThrows() const { return false; }
+
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
@@ -4169,6 +4195,10 @@ class HInvoke : public HVariableInputSizeInstruction {
bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+ void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
+
+ bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+
bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -4199,7 +4229,8 @@ class HInvoke : public HVariableInputSizeInstruction {
static constexpr size_t kFieldReturnTypeSize =
MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
static constexpr size_t kFlagCanThrow = kFieldReturnType + kFieldReturnTypeSize;
- static constexpr size_t kNumberOfInvokePackedBits = kFlagCanThrow + 1;
+ static constexpr size_t kFlagAlwaysThrows = kFlagCanThrow + 1;
+ static constexpr size_t kNumberOfInvokePackedBits = kFlagAlwaysThrows + 1;
static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>;
@@ -6575,6 +6606,8 @@ class HThrow FINAL : public HTemplateInstruction<1> {
bool CanThrow() const OVERRIDE { return true; }
+ bool AlwaysThrows() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(Throw);
protected:
@@ -7298,19 +7331,19 @@ HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr);
class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor {
public:
explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
- : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count(0) {}
+ : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count_(0) {}
void VisitInstruction(HInstruction* instruction) OVERRIDE {
if (instruction->IsClonable()) {
ReplaceInstrOrPhiByClone(instruction);
- instr_replaced_by_clones_count++;
+ instr_replaced_by_clones_count_++;
}
}
- size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count; }
+ size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count_; }
private:
- size_t instr_replaced_by_clones_count;
+ size_t instr_replaced_by_clones_count_;
DISALLOW_COPY_AND_ASSIGN(CloneAndReplaceInstructionVisitor);
};
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 87dff8403b..ecabdf3b76 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -131,8 +131,6 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
// Maps an integral type to the same-size signed type and leaves other types alone.
- // Can be used to test relaxed type consistency in which packed same-size integral
- // types can co-exist, but other type mixes are an error.
static DataType::Type ToSignedType(DataType::Type type) {
switch (type) {
case DataType::Type::kBool: // 1-byte storage unit
@@ -160,6 +158,11 @@ class HVecOperation : public HVariableInputSizeInstruction {
}
}
+ // Maps an integral type to the same-size (un)signed type. Leaves other types alone.
+ static DataType::Type ToProperType(DataType::Type type, bool is_unsigned) {
+ return is_unsigned ? ToUnsignedType(type) : ToSignedType(type);
+ }
+
// Helper method to determine if an instruction returns a SIMD value.
// TODO: This method is needed until we introduce SIMD as proper type.
static bool ReturnsSIMDValue(HInstruction* instruction) {
@@ -286,6 +289,8 @@ class HVecMemoryOperation : public HVecOperation {
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
+// Tests relaxed type consistency in which packed same-size integral types can co-exist,
+// but other type mixes are an error.
inline static bool HasConsistentPackedTypes(HInstruction* input, DataType::Type type) {
if (input->IsPhi()) {
return input->GetType() == HVecOperation::kSIMDType; // carries SIMD
@@ -518,7 +523,7 @@ class HVecAdd FINAL : public HVecBinaryOperation {
// Performs halving add on every component in the two vectors, viz.
// rounded [ x1, .. , xn ] hradd [ y1, .. , yn ] = [ (x1 + y1 + 1) >> 1, .. , (xn + yn + 1) >> 1 ]
// truncated [ x1, .. , xn ] hadd [ y1, .. , yn ] = [ (x1 + y1) >> 1, .. , (xn + yn ) >> 1 ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecHalvingAdd FINAL : public HVecBinaryOperation {
public:
HVecHalvingAdd(ArenaAllocator* allocator,
@@ -527,21 +532,13 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DataType::Type packed_type,
size_t vector_length,
bool is_rounded,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldHAddIsUnsigned>(is_unsigned);
SetPackedFlag<kFieldHAddIsRounded>(is_rounded);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldHAddIsUnsigned>(); }
bool IsRounded() const { return GetPackedFlag<kFieldHAddIsRounded>(); }
bool CanBeMoved() const OVERRIDE { return true; }
@@ -549,9 +546,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
DCHECK(other->IsVecHalvingAdd());
const HVecHalvingAdd* o = other->AsVecHalvingAdd();
- return HVecOperation::InstructionDataEquals(o) &&
- IsUnsigned() == o->IsUnsigned() &&
- IsRounded() == o->IsRounded();
+ return HVecOperation::InstructionDataEquals(o) && IsRounded() == o->IsRounded();
}
DECLARE_INSTRUCTION(VecHalvingAdd);
@@ -561,8 +556,7 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
private:
// Additional packed bits.
- static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
+ static constexpr size_t kFieldHAddIsRounded = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
@@ -638,7 +632,7 @@ class HVecDiv FINAL : public HVecBinaryOperation {
// Takes minimum of every component in the two vectors,
// viz. MIN( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ min(x1, y1), .. , min(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMin FINAL : public HVecBinaryOperation {
public:
HVecMin(ArenaAllocator* allocator,
@@ -646,44 +640,23 @@ class HVecMin FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMinOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMinOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMin());
- const HVecMin* o = other->AsVecMin();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMin);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMin);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
- static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Takes maximum of every component in the two vectors,
// viz. MAX( [ x1, .. , xn ] , [ y1, .. , yn ]) = [ max(x1, y1), .. , max(xn, yn) ]
-// for either both signed or both unsigned operands x, y.
+// for either both signed or both unsigned operands x, y (reflected in packed_type).
class HVecMax FINAL : public HVecBinaryOperation {
public:
HVecMax(ArenaAllocator* allocator,
@@ -691,39 +664,18 @@ class HVecMax FINAL : public HVecBinaryOperation {
HInstruction* right,
DataType::Type packed_type,
size_t vector_length,
- bool is_unsigned,
uint32_t dex_pc)
: HVecBinaryOperation(allocator, left, right, packed_type, vector_length, dex_pc) {
- // The `is_unsigned` flag should be used exclusively with the Int32 or Int64.
- // This flag is a temporary measure while we do not have the Uint32 and Uint64 data types.
- DCHECK(!is_unsigned ||
- packed_type == DataType::Type::kInt32 ||
- packed_type == DataType::Type::kInt64) << packed_type;
DCHECK(HasConsistentPackedTypes(left, packed_type));
DCHECK(HasConsistentPackedTypes(right, packed_type));
- SetPackedFlag<kFieldMaxOpIsUnsigned>(is_unsigned);
}
- bool IsUnsigned() const { return GetPackedFlag<kFieldMaxOpIsUnsigned>(); }
-
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
- DCHECK(other->IsVecMax());
- const HVecMax* o = other->AsVecMax();
- return HVecOperation::InstructionDataEquals(o) && IsUnsigned() == o->IsUnsigned();
- }
-
DECLARE_INSTRUCTION(VecMax);
protected:
DEFAULT_COPY_CONSTRUCTOR(VecMax);
-
- private:
- // Additional packed bits.
- static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
- static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
- static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
};
// Bitwise-ands every component in the two vectors,
diff --git a/compiler/optimizing/nodes_vector_test.cc b/compiler/optimizing/nodes_vector_test.cc
index ab9d7594d9..af13449646 100644
--- a/compiler/optimizing/nodes_vector_test.cc
+++ b/compiler/optimizing/nodes_vector_test.cc
@@ -282,143 +282,53 @@ TEST_F(NodesVectorTest, VectorAlignmentMattersOnStore) {
EXPECT_FALSE(v0->Equals(v1)); // no longer equal
}
-TEST_F(NodesVectorTest, VectorSignMattersOnMin) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMin* v0 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v1 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v2 = new (GetAllocator()) HVecMin(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMin* v3 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v4 = new (GetAllocator()) HVecMin(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v5 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* v6 = new (GetAllocator()) HVecMin(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMin* min_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMin* min_insn : min_insns) {
- EXPECT_TRUE(min_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMin* min_insn1 : min_insns) {
- for (HVecMin* min_insn2 : min_insns) {
- EXPECT_EQ(min_insn1 == min_insn2, min_insn1->Equals(min_insn2));
- }
- }
-}
-
-TEST_F(NodesVectorTest, VectorSignMattersOnMax) {
- HVecOperation* p0 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
- HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
- HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
-
- HVecMax* v0 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v1 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v2 = new (GetAllocator()) HVecMax(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2, /*is_unsigned*/ true, kNoDexPc);
- HVecMax* v3 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v4 = new (GetAllocator()) HVecMax(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v5 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* v6 = new (GetAllocator()) HVecMax(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 8, /*is_unsigned*/ false, kNoDexPc);
- HVecMax* max_insns[] = { v0, v1, v2, v3, v4, v5, v6 };
-
- EXPECT_FALSE(p0->CanBeMoved());
- EXPECT_FALSE(p1->CanBeMoved());
- EXPECT_FALSE(p2->CanBeMoved());
-
- for (HVecMax* max_insn : max_insns) {
- EXPECT_TRUE(max_insn->CanBeMoved());
- }
-
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_FALSE(v1->IsUnsigned());
- EXPECT_TRUE(v2->IsUnsigned());
-
- for (HVecMax* max_insn1 : max_insns) {
- for (HVecMax* max_insn2 : max_insns) {
- EXPECT_EQ(max_insn1 == max_insn2, max_insn1->Equals(max_insn2));
- }
- }
-}
-
TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
+ HVecOperation* u0 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kUint32, 4, kNoDexPc);
+ HVecOperation* u1 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kUint16, 8, kNoDexPc);
+ HVecOperation* u2 = new (GetAllocator())
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kUint8, 16, kNoDexPc);
+
HVecOperation* p0 = new (GetAllocator())
HVecReplicateScalar(GetAllocator(), int32_parameter_, DataType::Type::kInt32, 4, kNoDexPc);
HVecOperation* p1 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 8, kNoDexPc);
HVecOperation* p2 = new (GetAllocator())
- HVecReplicateScalar(GetAllocator(), int16_parameter_, DataType::Type::kInt16, 4, kNoDexPc);
+ HVecReplicateScalar(GetAllocator(), int8_parameter_, DataType::Type::kInt8, 16, kNoDexPc);
HVecHalvingAdd* v0 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v1 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u0, u0, DataType::Type::kUint32, 4, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v2 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v3 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 4,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p0, p0, DataType::Type::kInt32, 4, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v4 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p0, p0, DataType::Type::kInt32, 2,
- /*is_rounded*/ true, /*is_unsigned*/ true, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v5 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u1, u1, DataType::Type::kUint16, 8, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v6 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kUint8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v7 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p1, p1, DataType::Type::kInt16, 8, /*is_rounded*/ false, kNoDexPc);
+
HVecHalvingAdd* v8 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p1, p1, DataType::Type::kInt8, 16,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v9 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), u2, u2, DataType::Type::kUint8, 16, /*is_rounded*/ false, kNoDexPc);
HVecHalvingAdd* v10 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kUint16, 8,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ true, kNoDexPc);
HVecHalvingAdd* v11 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ true, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* v12 = new (GetAllocator()) HVecHalvingAdd(
- GetAllocator(), p2, p2, DataType::Type::kInt16, 2,
- /*is_rounded*/ false, /*is_unsigned*/ false, kNoDexPc);
- HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12 };
+ GetAllocator(), p2, p2, DataType::Type::kInt8, 16, /*is_rounded*/ false, kNoDexPc);
+ HVecHalvingAdd* hadd_insns[] = { v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11 };
+
+ EXPECT_FALSE(u0->CanBeMoved());
+ EXPECT_FALSE(u1->CanBeMoved());
+ EXPECT_FALSE(u2->CanBeMoved());
EXPECT_FALSE(p0->CanBeMoved());
EXPECT_FALSE(p1->CanBeMoved());
EXPECT_FALSE(p2->CanBeMoved());
@@ -427,26 +337,18 @@ TEST_F(NodesVectorTest, VectorAttributesMatterOnHalvingAdd) {
EXPECT_TRUE(hadd_insn->CanBeMoved());
}
- // Deprecated; IsUnsigned() should be removed with the introduction of Uint32 and Uint64.
- EXPECT_TRUE(v0->IsUnsigned());
- EXPECT_TRUE(v1->IsUnsigned());
- EXPECT_TRUE(!v2->IsUnsigned());
- EXPECT_TRUE(!v3->IsUnsigned());
- EXPECT_TRUE(v4->IsUnsigned());
-
EXPECT_TRUE(v0->IsRounded());
EXPECT_TRUE(!v1->IsRounded());
EXPECT_TRUE(v2->IsRounded());
EXPECT_TRUE(!v3->IsRounded());
EXPECT_TRUE(v4->IsRounded());
- EXPECT_TRUE(v5->IsRounded());
- EXPECT_TRUE(!v6->IsRounded());
- EXPECT_TRUE(v7->IsRounded());
- EXPECT_TRUE(!v8->IsRounded());
- EXPECT_TRUE(v9->IsRounded());
- EXPECT_TRUE(!v10->IsRounded());
- EXPECT_TRUE(v11->IsRounded());
- EXPECT_TRUE(!v12->IsRounded());
+ EXPECT_TRUE(!v5->IsRounded());
+ EXPECT_TRUE(v6->IsRounded());
+ EXPECT_TRUE(!v7->IsRounded());
+ EXPECT_TRUE(v8->IsRounded());
+ EXPECT_TRUE(!v9->IsRounded());
+ EXPECT_TRUE(v10->IsRounded());
+ EXPECT_TRUE(!v11->IsRounded());
for (HVecHalvingAdd* hadd_insn1 : hadd_insns) {
for (HVecHalvingAdd* hadd_insn2 : hadd_insns) {
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index e2b2106f65..d20b681b49 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -41,7 +41,7 @@ namespace art {
// Run the tests only on host.
#ifndef ART_TARGET_ANDROID
-class OptimizingCFITest : public CFITest {
+class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
public:
// Enable this flag to generate the expected outputs.
static constexpr bool kGenerateExpected = false;
@@ -63,7 +63,7 @@ class OptimizingCFITest : public CFITest {
// Setup simple context.
std::string error;
isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
- graph_ = CreateGraph(&pool_and_allocator_);
+ graph_ = CreateGraph();
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f4115f7e7b..b3f23a0dcd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -382,6 +382,9 @@ class OptimizingCompiler FINAL : public Compiler {
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
+ void GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo method_debug_info)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -772,7 +775,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
return nullptr;
}
- CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item);
+ CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
HGraph* graph = new (allocator) HGraph(
allocator,
arena_stack,
@@ -783,7 +786,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
compiler_driver->GetCompilerOptions().GetDebuggable(),
osr);
- const uint8_t* interpreter_metadata = nullptr;
+ ArrayRef<const uint8_t> interpreter_metadata;
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
@@ -940,7 +943,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
compiler_driver,
codegen.get(),
compilation_stats_.get(),
- /* interpreter_metadata */ nullptr,
+ /* interpreter_metadata */ ArrayRef<const uint8_t>(),
handles);
builder.BuildIntrinsicGraph(method);
}
@@ -1230,7 +1233,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
info.class_def_index = class_def_idx;
info.dex_method_index = method_idx;
@@ -1246,14 +1249,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = nullptr;
info.cfi = jni_compiled_method.GetCfi();
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- GetCompilerDriver()->GetInstructionSet(),
- GetCompilerDriver()->GetInstructionSetFeatures(),
- mini_debug_info,
- info);
- CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ GenerateJitDebugInfo(method, info);
}
Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1361,7 +1357,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
info.class_def_index = class_def_idx;
info.dex_method_index = method_idx;
@@ -1377,14 +1373,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- GetCompilerDriver()->GetInstructionSet(),
- GetCompilerDriver()->GetInstructionSetFeatures(),
- mini_debug_info,
- info);
- CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ GenerateJitDebugInfo(method, info);
}
Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1408,4 +1397,27 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return true;
}
+void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDebugInfo info) {
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+ DCHECK(compiler_options.GenerateAnyDebugInfo());
+
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+
+ // Create entry for the single method that we just compiled.
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
+ GetCompilerDriver()->GetInstructionSet(),
+ GetCompilerDriver()->GetInstructionSetFeatures(),
+ mini_debug_info,
+ ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+ JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
+ IncrementJITCodeEntryRefcount(entry, info.code_address);
+
+ VLOG(jit)
+ << "JIT mini-debug-info added for " << ArtMethod::PrettyMethod(method)
+ << " size=" << PrettySize(elf_file.size())
+ << " total_size=" << PrettySize(GetJITCodeEntryMemUsage());
+}
+
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 32a94ab5e4..0023265e50 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -75,6 +75,7 @@ enum class MethodCompilationStat {
kImplicitNullCheckGenerated,
kExplicitNullCheckGenerated,
kSimplifyIf,
+ kSimplifyThrowingInvoke,
kInstructionSunk,
kNotInlinedUnresolvedEntrypoint,
kNotInlinedDexCache,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 8c97d57f4a..6dcbadba6e 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,12 +17,16 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
+#include <memory>
+#include <vector>
+
#include "base/scoped_arena_allocator.h"
#include "builder.h"
#include "common_compiler_test.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
+#include "dex/standard_dex_file.h"
#include "driver/dex_compilation_unit.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
@@ -99,18 +103,11 @@ class ArenaPoolAndAllocator {
ScopedArenaAllocator scoped_allocator_;
};
-inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
- return new (pool_and_allocator->GetAllocator()) HGraph(
- pool_and_allocator->GetAllocator(),
- pool_and_allocator->GetArenaStack(),
- *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))),
- /*method_idx*/-1,
- kRuntimeISA);
-}
-
-class OptimizingUnitTest : public CommonCompilerTest {
- protected:
- OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
+// Have a separate helper so the OptimizingCFITest can inherit it without causing
+// multiple inheritance errors from having two gtest as a parent twice.
+class OptimizingUnitTestHelper {
+ public:
+ OptimizingUnitTestHelper() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
@@ -122,14 +119,42 @@ class OptimizingUnitTest : public CommonCompilerTest {
}
HGraph* CreateGraph() {
- return art::CreateGraph(pool_and_allocator_.get());
+ ArenaAllocator* const allocator = pool_and_allocator_->GetAllocator();
+
+ // Reserve a big array of 0s so the dex file constructor can offsets from the header.
+ static constexpr size_t kDexDataSize = 4 * KB;
+ const uint8_t* dex_data = reinterpret_cast<uint8_t*>(allocator->Alloc(kDexDataSize));
+
+ // Create the dex file based on the fake data. Call the constructor so that we can use virtual
+ // functions. Don't use the arena for the StandardDexFile otherwise the dex location leaks.
+ dex_files_.emplace_back(new StandardDexFile(
+ dex_data,
+ sizeof(StandardDexFile::Header),
+ "no_location",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*container*/ nullptr));
+
+ return new (allocator) HGraph(
+ allocator,
+ pool_and_allocator_->GetArenaStack(),
+ *dex_files_.back(),
+ /*method_idx*/-1,
+ kRuntimeISA);
}
// Create a control-flow graph from Dex instructions.
- HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* CreateCFG(const std::vector<uint16_t>& data,
+ DataType::Type return_type = DataType::Type::kInt32) {
HGraph* graph = CreateGraph();
+ // The code item data might not aligned to 4 bytes, copy it to ensure that.
+ const size_t code_item_size = data.size() * sizeof(data.front());
+ void* aligned_data = GetAllocator()->Alloc(code_item_size);
+ memcpy(aligned_data, &data[0], code_item_size);
+ CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
+ const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+
{
ScopedObjectAccess soa(Thread::Current());
if (handles_ == nullptr) {
@@ -146,7 +171,7 @@ class OptimizingUnitTest : public CommonCompilerTest {
/* access_flags */ 0u,
/* verified_method */ nullptr,
handles_->NewHandle<mirror::DexCache>(nullptr));
- CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item);
+ CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
@@ -154,10 +179,13 @@ class OptimizingUnitTest : public CommonCompilerTest {
}
private:
+ std::vector<std::unique_ptr<const StandardDexFile>> dex_files_;
std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
std::unique_ptr<VariableSizedHandleScope> handles_;
};
+class OptimizingUnitTest : public CommonCompilerTest, public OptimizingUnitTestHelper {};
+
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 4fc7fe9427..6ef386b4a5 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -29,10 +29,10 @@ namespace art {
class PrettyPrinterTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
-void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
+void PrettyPrinterTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
@@ -40,7 +40,7 @@ void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
}
TEST_F(PrettyPrinterTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
const char* expected =
@@ -67,7 +67,7 @@ TEST_F(PrettyPrinterTest, CFG1) {
"BasicBlock 3, pred: 2\n"
" 4: Exit\n";
- const uint16_t data[] =
+ const std::vector<uint16_t> data =
ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -89,7 +89,7 @@ TEST_F(PrettyPrinterTest, CFG2) {
"BasicBlock 4, pred: 3\n"
" 5: Exit\n";
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -111,21 +111,21 @@ TEST_F(PrettyPrinterTest, CFG3) {
"BasicBlock 4, pred: 2\n"
" 5: Exit\n";
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
TestCode(data1, expected);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2, expected);
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -144,13 +144,13 @@ TEST_F(PrettyPrinterTest, CFG4) {
"BasicBlock 3, pred: 0, succ: 1\n"
" 0: Goto 1\n";
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
TestCode(data1, expected);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data2, expected);
@@ -166,7 +166,7 @@ TEST_F(PrettyPrinterTest, CFG5) {
"BasicBlock 3, pred: 1\n"
" 3: Exit\n";
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
@@ -192,7 +192,7 @@ TEST_F(PrettyPrinterTest, CFG6) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 0: Goto 3\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -220,7 +220,7 @@ TEST_F(PrettyPrinterTest, CFG7) {
"BasicBlock 6, pred: 1, succ: 2\n"
" 1: Goto 2\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -240,7 +240,7 @@ TEST_F(PrettyPrinterTest, IntConstant) {
"BasicBlock 2, pred: 1\n"
" 4: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index 1d3fe0334d..27f9ac3990 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -103,6 +103,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
case DataType::Type::kFloat64:
slot += long_spill_slots;
FALLTHROUGH_INTENDED;
+ case DataType::Type::kUint64:
case DataType::Type::kInt64:
slot += float_spill_slots;
FALLTHROUGH_INTENDED;
@@ -110,6 +111,7 @@ void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoint
slot += int_spill_slots;
FALLTHROUGH_INTENDED;
case DataType::Type::kReference:
+ case DataType::Type::kUint32:
case DataType::Type::kInt32:
case DataType::Type::kUint16:
case DataType::Type::kUint8:
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index ad5248e982..fa7ad82316 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1972,6 +1972,8 @@ void RegisterAllocatorGraphColor::AllocateSpillSlots(ArrayRef<InterferenceNode*
case DataType::Type::kInt16:
int_intervals.push_back(parent);
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << node->GetInterval()->GetType();
UNREACHABLE();
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index cfe63bd758..216fb57a96 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -1131,6 +1131,8 @@ void RegisterAllocatorLinearScan::AllocateSpillSlotFor(LiveInterval* interval) {
case DataType::Type::kInt16:
spill_slots = &int_spill_slots_;
break;
+ case DataType::Type::kUint32:
+ case DataType::Type::kUint64:
case DataType::Type::kVoid:
LOG(FATAL) << "Unexpected type for interval " << interval->GetType();
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3748d599a3..a70b0664dc 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -46,7 +46,7 @@ class RegisterAllocatorTest : public OptimizingUnitTest {
void ExpectedInRegisterHint(Strategy strategy);
// Helper functions that make use of the OptimizingUnitTest's members.
- bool Check(const uint16_t* data, Strategy strategy);
+ bool Check(const std::vector<uint16_t>& data, Strategy strategy);
void CFG1(Strategy strategy);
void Loop1(Strategy strategy);
void Loop2(Strategy strategy);
@@ -79,7 +79,7 @@ TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
test_name(Strategy::kRegisterAllocatorGraphColor);\
}
-bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) {
+bool RegisterAllocatorTest::Check(const std::vector<uint16_t>& data, Strategy strategy) {
HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -185,7 +185,7 @@ void RegisterAllocatorTest::CFG1(Strategy strategy) {
* |
* exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -222,7 +222,7 @@ void RegisterAllocatorTest::Loop1(Strategy strategy) {
* exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -268,7 +268,7 @@ void RegisterAllocatorTest::Loop2(Strategy strategy) {
* exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 8 << 12 | 1 << 8,
Instruction::IF_EQ | 1 << 8, 7,
@@ -314,7 +314,7 @@ void RegisterAllocatorTest::Loop3(Strategy strategy) {
* exit
*/
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
Instruction::CONST_4 | 5 << 12 | 2 << 8,
@@ -351,7 +351,7 @@ void RegisterAllocatorTest::Loop3(Strategy strategy) {
TEST_ALL_STRATEGIES(Loop3);
TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8,
Instruction::XOR_INT_LIT8 | 0 << 8, 1 << 8,
@@ -402,7 +402,7 @@ void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
* } while (true);
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 0,
Instruction::IF_NE | 1 << 8 | 1 << 12, 3,
@@ -432,7 +432,7 @@ TEST_ALL_STRATEGIES(DeadPhi);
* This test only applies to the linear scan allocator.
*/
TEST_F(RegisterAllocatorTest, FreeUntil) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 104ebc79c2..fb15fc8975 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -182,7 +182,9 @@ class SchedulerTest : public OptimizingUnitTest {
scheduler->Schedule(graph_);
}
- void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) {
+ void CompileWithRandomSchedulerAndRun(const std::vector<uint16_t>& data,
+ bool has_result,
+ int expected) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
HGraph* graph = CreateCFG(data);
@@ -393,7 +395,7 @@ TEST_F(SchedulerTest, RandomScheduling) {
// }
// return result;
//
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 12 | 2 << 8, // const/4 v2, #int 0
Instruction::CONST_HIGH16 | 0 << 8, 0x4120, // const/high16 v0, #float 10.0 // #41200000
Instruction::CONST_4 | 1 << 12 | 1 << 8, // const/4 v1, #int 1
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 77e70d733e..85ed06eb9b 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -31,7 +31,7 @@ namespace art {
class SsaTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
class SsaPrettyPrinter : public HPrettyPrinter {
@@ -80,7 +80,7 @@ static void ReNumberInstructions(HGraph* graph) {
}
}
-void SsaTest::TestCode(const uint16_t* data, const char* expected) {
+void SsaTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
@@ -119,7 +119,7 @@ TEST_F(SsaTest, CFG1) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 7: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -150,7 +150,7 @@ TEST_F(SsaTest, CFG2) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 9: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -181,7 +181,7 @@ TEST_F(SsaTest, CFG3) {
"BasicBlock 5, pred: 4\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -214,7 +214,7 @@ TEST_F(SsaTest, Loop1) {
"BasicBlock 6, pred: 5\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -245,7 +245,7 @@ TEST_F(SsaTest, Loop2) {
"BasicBlock 5, pred: 4\n"
" 9: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -276,7 +276,7 @@ TEST_F(SsaTest, Loop3) {
"BasicBlock 5, pred: 4\n"
" 10: Exit\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -310,7 +310,7 @@ TEST_F(SsaTest, Loop4) {
"BasicBlock 6, pred: 5\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x500,
Instruction::IF_EQ, 5,
@@ -351,7 +351,7 @@ TEST_F(SsaTest, Loop5) {
" 13: Phi(2, 1) [11, 8, 8]\n"
" 14: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -390,7 +390,7 @@ TEST_F(SsaTest, Loop6) {
"BasicBlock 7, pred: 6\n"
" 13: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -432,7 +432,7 @@ TEST_F(SsaTest, Loop7) {
"BasicBlock 8, pred: 2, succ: 6\n"
" 15: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -456,7 +456,7 @@ TEST_F(SsaTest, DeadLocal) {
"BasicBlock 2, pred: 1\n"
" 3: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -484,7 +484,7 @@ TEST_F(SsaTest, LocalInIf) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 8: Goto\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
@@ -520,7 +520,7 @@ TEST_F(SsaTest, MultiplePredecessors) {
"BasicBlock 7, pred: 3, succ: 5\n"
" 12: Goto\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 5,
Instruction::ADD_INT_LIT8 | 1 << 8, 0 << 8,
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
new file mode 100644
index 0000000000..a7c23bef7e
--- /dev/null
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -0,0 +1,704 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "superblock_cloner.h"
+
+#include "common_dominator.h"
+#include "graph_checker.h"
+
+#include <iostream>
+
+namespace art {
+
+using HBasicBlockMap = SuperblockCloner::HBasicBlockMap;
+using HInstructionMap = SuperblockCloner::HInstructionMap;
+using HBasicBlockSet = SuperblockCloner::HBasicBlockSet;
+using HEdgeSet = SuperblockCloner::HEdgeSet;
+
+void HEdge::Dump(std::ostream& stream) const {
+ stream << "(" << from_ << "->" << to_ << ")";
+}
+
+//
+// Static helper methods.
+//
+
+// Returns whether instruction has any uses (regular or environmental) outside the region,
+// defined by basic block set.
+static bool IsUsedOutsideRegion(const HInstruction* instr, const HBasicBlockSet& bb_set) {
+ auto& uses = instr->GetUses();
+ for (auto use_node = uses.begin(), e = uses.end(); use_node != e; ++use_node) {
+ HInstruction* user = use_node->GetUser();
+ if (!bb_set.IsBitSet(user->GetBlock()->GetBlockId())) {
+ return true;
+ }
+ }
+
+ auto& env_uses = instr->GetEnvUses();
+ for (auto use_node = env_uses.begin(), e = env_uses.end(); use_node != e; ++use_node) {
+ HInstruction* user = use_node->GetUser()->GetHolder();
+ if (!bb_set.IsBitSet(user->GetBlock()->GetBlockId())) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// Returns whether the phi's inputs are the same HInstruction.
+static bool ArePhiInputsTheSame(const HPhi* phi) {
+ HInstruction* first_input = phi->InputAt(0);
+ for (size_t i = 1, e = phi->InputCount(); i < e; i++) {
+ if (phi->InputAt(i) != first_input) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Returns a common predecessor of loop1 and loop2 in the loop tree or nullptr if it is the whole
+// graph.
+static HLoopInformation* FindCommonLoop(HLoopInformation* loop1, HLoopInformation* loop2) {
+ if (loop1 != nullptr || loop2 != nullptr) {
+ return nullptr;
+ }
+
+ if (loop1->IsIn(*loop2)) {
+ return loop2;
+ } else if (loop2->IsIn(*loop1)) {
+ return loop1;
+ }
+ HBasicBlock* block = CommonDominator::ForPair(loop1->GetHeader(), loop2->GetHeader());
+ return block->GetLoopInformation();
+}
+
+// Calls HGraph::OrderLoopHeaderPredecessors for each loop in the graph.
+static void OrderLoopsHeadersPredecessors(HGraph* graph) {
+ for (HBasicBlock* block : graph->GetPostOrder()) {
+ if (block->IsLoopHeader()) {
+ graph->OrderLoopHeaderPredecessors(block);
+ }
+ }
+}
+
+//
+// Helpers for CloneBasicBlock.
+//
+
+void SuperblockCloner::ReplaceInputsWithCopies(HInstruction* copy_instr) {
+ DCHECK(!copy_instr->IsPhi());
+ for (size_t i = 0, e = copy_instr->InputCount(); i < e; i++) {
+ // Copy instruction holds the same input as the original instruction holds.
+ HInstruction* orig_input = copy_instr->InputAt(i);
+ if (!IsInOrigBBSet(orig_input->GetBlock())) {
+ // Defined outside the subgraph.
+ continue;
+ }
+ HInstruction* copy_input = GetInstrCopy(orig_input);
+ // copy_instr will be registered as a user of copy_inputs after returning from this function:
+ // 'copy_block->AddInstruction(copy_instr)'.
+ copy_instr->SetRawInputAt(i, copy_input);
+ }
+}
+
+void SuperblockCloner::DeepCloneEnvironmentWithRemapping(HInstruction* copy_instr,
+ const HEnvironment* orig_env) {
+ if (orig_env->GetParent() != nullptr) {
+ DeepCloneEnvironmentWithRemapping(copy_instr, orig_env->GetParent());
+ }
+ HEnvironment* copy_env = new (arena_) HEnvironment(arena_, *orig_env, copy_instr);
+
+ for (size_t i = 0; i < orig_env->Size(); i++) {
+ HInstruction* env_input = orig_env->GetInstructionAt(i);
+ if (env_input != nullptr && IsInOrigBBSet(env_input->GetBlock())) {
+ env_input = GetInstrCopy(env_input);
+ DCHECK(env_input != nullptr && env_input->GetBlock() != nullptr);
+ }
+ copy_env->SetRawEnvAt(i, env_input);
+ if (env_input != nullptr) {
+ env_input->AddEnvUseAt(copy_env, i);
+ }
+ }
+ // InsertRawEnvironment assumes that instruction already has an environment that's why we use
+ // SetRawEnvironment in the 'else' case.
+ // As this function calls itself recursively with the same copy_instr - this copy_instr may
+ // have partially copied chain of HEnvironments.
+ if (copy_instr->HasEnvironment()) {
+ copy_instr->InsertRawEnvironment(copy_env);
+ } else {
+ copy_instr->SetRawEnvironment(copy_env);
+ }
+}
+
+//
+// Helpers for RemapEdgesSuccessors.
+//
+
+void SuperblockCloner::RemapOrigInternalOrIncomingEdge(HBasicBlock* orig_block,
+ HBasicBlock* orig_succ) {
+ DCHECK(IsInOrigBBSet(orig_succ));
+ HBasicBlock* copy_succ = GetBlockCopy(orig_succ);
+
+ size_t this_index = orig_succ->GetPredecessorIndexOf(orig_block);
+ size_t phi_input_count = 0;
+ // This flag reflects whether the original successor has at least one phi and this phi
+ // has been already processed in the loop. Used for validation purposes in DCHECK to check that
+ // in the end all of the phis in the copy successor have the same number of inputs - the number
+ // of copy successor's predecessors.
+ bool first_phi_met = false;
+ for (HInstructionIterator it(orig_succ->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* orig_phi = it.Current()->AsPhi();
+ HPhi* copy_phi = GetInstrCopy(orig_phi)->AsPhi();
+ HInstruction* orig_phi_input = orig_phi->InputAt(this_index);
+ // Remove corresponding input for original phi.
+ orig_phi->RemoveInputAt(this_index);
+ // Copy phi doesn't yet have either orig_block as predecessor or the input that corresponds
+ // to orig_block, so add the input at the end of the list.
+ copy_phi->AddInput(orig_phi_input);
+ if (!first_phi_met) {
+ phi_input_count = copy_phi->InputCount();
+ first_phi_met = true;
+ } else {
+ DCHECK_EQ(phi_input_count, copy_phi->InputCount());
+ }
+ }
+ // orig_block will be put at the end of the copy_succ's predecessors list; that corresponds
+ // to the previously added phi inputs position.
+ orig_block->ReplaceSuccessor(orig_succ, copy_succ);
+ DCHECK(!first_phi_met || copy_succ->GetPredecessors().size() == phi_input_count);
+}
+
+void SuperblockCloner::AddCopyInternalEdge(HBasicBlock* orig_block,
+ HBasicBlock* orig_succ) {
+ DCHECK(IsInOrigBBSet(orig_succ));
+ HBasicBlock* copy_block = GetBlockCopy(orig_block);
+ HBasicBlock* copy_succ = GetBlockCopy(orig_succ);
+ copy_block->AddSuccessor(copy_succ);
+
+ size_t orig_index = orig_succ->GetPredecessorIndexOf(orig_block);
+ for (HInstructionIterator it(orig_succ->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* orig_phi = it.Current()->AsPhi();
+ HPhi* copy_phi = GetInstrCopy(orig_phi)->AsPhi();
+ HInstruction* orig_phi_input = orig_phi->InputAt(orig_index);
+ copy_phi->AddInput(orig_phi_input);
+ }
+}
+
+void SuperblockCloner::RemapCopyInternalEdge(HBasicBlock* orig_block,
+ HBasicBlock* orig_succ) {
+ DCHECK(IsInOrigBBSet(orig_succ));
+ HBasicBlock* copy_block = GetBlockCopy(orig_block);
+ copy_block->AddSuccessor(orig_succ);
+ DCHECK(copy_block->HasSuccessor(orig_succ));
+
+ size_t orig_index = orig_succ->GetPredecessorIndexOf(orig_block);
+ for (HInstructionIterator it(orig_succ->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* orig_phi = it.Current()->AsPhi();
+ HInstruction* orig_phi_input = orig_phi->InputAt(orig_index);
+ orig_phi->AddInput(orig_phi_input);
+ }
+}
+
+//
+// Local versions of CF calculation/adjustment routines.
+//
+
+// TODO: merge with the original version in nodes.cc. The concern is that we don't want to affect
+// the performance of the base version by checking the local set.
+// TODO: this version works when updating the back edges info for natural loop-based local_set.
+// Check which exactly types of subgraphs can be analysed or rename it to
+// FindBackEdgesInTheNaturalLoop.
+void SuperblockCloner::FindBackEdgesLocal(HBasicBlock* entry_block, ArenaBitVector* local_set) {
+ ArenaBitVector visited(arena_, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+ // "visited" must be empty on entry, it's an output argument for all visited (i.e. live) blocks.
+ DCHECK_EQ(visited.GetHighestBitSet(), -1);
+
+ // Nodes that we're currently visiting, indexed by block id.
+ ArenaBitVector visiting(arena_, graph_->GetBlocks().size(), false, kArenaAllocGraphBuilder);
+ // Number of successors visited from a given node, indexed by block id.
+ ArenaVector<size_t> successors_visited(graph_->GetBlocks().size(),
+ 0u,
+ arena_->Adapter(kArenaAllocGraphBuilder));
+ // Stack of nodes that we're currently visiting (same as marked in "visiting" above).
+ ArenaVector<HBasicBlock*> worklist(arena_->Adapter(kArenaAllocGraphBuilder));
+ constexpr size_t kDefaultWorklistSize = 8;
+ worklist.reserve(kDefaultWorklistSize);
+
+ visited.SetBit(entry_block->GetBlockId());
+ visiting.SetBit(entry_block->GetBlockId());
+ worklist.push_back(entry_block);
+
+ while (!worklist.empty()) {
+ HBasicBlock* current = worklist.back();
+ uint32_t current_id = current->GetBlockId();
+ if (successors_visited[current_id] == current->GetSuccessors().size()) {
+ visiting.ClearBit(current_id);
+ worklist.pop_back();
+ } else {
+ HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
+ uint32_t successor_id = successor->GetBlockId();
+ if (!local_set->IsBitSet(successor_id)) {
+ continue;
+ }
+
+ if (visiting.IsBitSet(successor_id)) {
+ DCHECK(ContainsElement(worklist, successor));
+ successor->AddBackEdgeWhileUpdating(current);
+ } else if (!visited.IsBitSet(successor_id)) {
+ visited.SetBit(successor_id);
+ visiting.SetBit(successor_id);
+ worklist.push_back(successor);
+ }
+ }
+ }
+}
+
+void SuperblockCloner::RecalculateBackEdgesInfo(ArenaBitVector* outer_loop_bb_set) {
+ // TODO: DCHECK that after the transformation the graph is connected.
+ HBasicBlock* block_entry = nullptr;
+
+ if (outer_loop_ == nullptr) {
+ for (auto block : graph_->GetBlocks()) {
+ if (block != nullptr) {
+ outer_loop_bb_set->SetBit(block->GetBlockId());
+ HLoopInformation* info = block->GetLoopInformation();
+ if (info != nullptr) {
+ info->ResetBasicBlockData();
+ }
+ }
+ }
+ block_entry = graph_->GetEntryBlock();
+ } else {
+ outer_loop_bb_set->Copy(&outer_loop_bb_set_);
+ block_entry = outer_loop_->GetHeader();
+
+ // Add newly created copy blocks.
+ for (auto entry : *bb_map_) {
+ outer_loop_bb_set->SetBit(entry.second->GetBlockId());
+ }
+
+ // Clear loop_info for the whole outer loop.
+ for (uint32_t idx : outer_loop_bb_set->Indexes()) {
+ HBasicBlock* block = GetBlockById(idx);
+ HLoopInformation* info = block->GetLoopInformation();
+ if (info != nullptr) {
+ info->ResetBasicBlockData();
+ }
+ }
+ }
+
+ FindBackEdgesLocal(block_entry, outer_loop_bb_set);
+
+ for (uint32_t idx : outer_loop_bb_set->Indexes()) {
+ HBasicBlock* block = GetBlockById(idx);
+ HLoopInformation* info = block->GetLoopInformation();
+ // Reset LoopInformation for regular blocks and old headers which are no longer loop headers.
+ if (info != nullptr &&
+ (info->GetHeader() != block || info->NumberOfBackEdges() == 0)) {
+ block->SetLoopInformation(nullptr);
+ }
+ }
+}
+
+// This is a modified version of HGraph::AnalyzeLoops.
+GraphAnalysisResult SuperblockCloner::AnalyzeLoopsLocally(ArenaBitVector* outer_loop_bb_set) {
+ // We iterate post order to ensure we visit inner loops before outer loops.
+ // `PopulateRecursive` needs this guarantee to know whether a natural loop
+ // contains an irreducible loop.
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
+ if (!outer_loop_bb_set->IsBitSet(block->GetBlockId())) {
+ continue;
+ }
+ if (block->IsLoopHeader()) {
+ if (block->IsCatchBlock()) {
+ // TODO: Dealing with exceptional back edges could be tricky because
+ // they only approximate the real control flow. Bail out for now.
+ return kAnalysisFailThrowCatchLoop;
+ }
+ block->GetLoopInformation()->Populate();
+ }
+ }
+
+ for (HBasicBlock* block : graph_->GetPostOrder()) {
+ if (!outer_loop_bb_set->IsBitSet(block->GetBlockId())) {
+ continue;
+ }
+ if (block->IsLoopHeader()) {
+ HLoopInformation* cur_loop = block->GetLoopInformation();
+ HLoopInformation* outer_loop = cur_loop->GetPreHeader()->GetLoopInformation();
+ if (outer_loop != nullptr) {
+ outer_loop->PopulateInnerLoopUpwards(cur_loop);
+ }
+ }
+ }
+
+ return kAnalysisSuccess;
+}
+
+void SuperblockCloner::CleanUpControlFlow() {
+ // TODO: full control flow clean up for now, optimize it.
+ graph_->ClearDominanceInformation();
+
+ ArenaBitVector outer_loop_bb_set(
+ arena_, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+ RecalculateBackEdgesInfo(&outer_loop_bb_set);
+
+ // TODO: do it locally.
+ graph_->SimplifyCFG();
+ graph_->ComputeDominanceInformation();
+
+ // AnalyzeLoopsLocally requires a correct post-ordering information which was calculated just
+ // before in ComputeDominanceInformation.
+ GraphAnalysisResult result = AnalyzeLoopsLocally(&outer_loop_bb_set);
+ DCHECK_EQ(result, kAnalysisSuccess);
+
+ // TODO: do it locally
+ OrderLoopsHeadersPredecessors(graph_);
+
+ graph_->ComputeTryBlockInformation();
+}
+
+//
+// Helpers for ResolveDataFlow
+//
+
+void SuperblockCloner::ResolvePhi(HPhi* phi) {
+ HBasicBlock* phi_block = phi->GetBlock();
+ for (size_t i = 0, e = phi->InputCount(); i < e; i++) {
+ HInstruction* input = phi->InputAt(i);
+ HBasicBlock* input_block = input->GetBlock();
+
+ // Originally defined outside the region.
+ if (!IsInOrigBBSet(input_block)) {
+ continue;
+ }
+ HBasicBlock* corresponding_block = phi_block->GetPredecessors()[i];
+ if (!IsInOrigBBSet(corresponding_block)) {
+ phi->ReplaceInput(GetInstrCopy(input), i);
+ }
+ }
+}
+
+//
+// Main algorithm methods.
+//
+
+void SuperblockCloner::SearchForSubgraphExits(ArenaVector<HBasicBlock*>* exits) {
+ DCHECK(exits->empty());
+ for (uint32_t block_id : orig_bb_set_.Indexes()) {
+ HBasicBlock* block = GetBlockById(block_id);
+ for (HBasicBlock* succ : block->GetSuccessors()) {
+ if (!IsInOrigBBSet(succ)) {
+ exits->push_back(succ);
+ }
+ }
+ }
+}
+
+void SuperblockCloner::FindAndSetLocalAreaForAdjustments() {
+ DCHECK(outer_loop_ == nullptr);
+ ArenaVector<HBasicBlock*> exits(arena_->Adapter(kArenaAllocSuperblockCloner));
+ SearchForSubgraphExits(&exits);
+
+ // For a reducible graph we need to update back-edges and dominance information only for
+ // the outermost loop which is affected by the transformation - it can be found by picking
+ // the common most outer loop of loops to which the subgraph exits blocks belong.
+ // Note: it can a loop or the whole graph (outer_loop_ will be nullptr in this case).
+ for (HBasicBlock* exit : exits) {
+ HLoopInformation* loop_exit_loop_info = exit->GetLoopInformation();
+ if (loop_exit_loop_info == nullptr) {
+ outer_loop_ = nullptr;
+ break;
+ }
+ outer_loop_ = FindCommonLoop(outer_loop_, loop_exit_loop_info);
+ }
+
+ if (outer_loop_ != nullptr) {
+ // Save the loop population info as it will be changed later.
+ outer_loop_bb_set_.Copy(&outer_loop_->GetBlocks());
+ }
+}
+
+void SuperblockCloner::RemapEdgesSuccessors() {
+ // Redirect incoming edges.
+ for (HEdge e : *remap_incoming_) {
+ HBasicBlock* orig_block = GetBlockById(e.GetFrom());
+ HBasicBlock* orig_succ = GetBlockById(e.GetTo());
+ RemapOrigInternalOrIncomingEdge(orig_block, orig_succ);
+ }
+
+ // Redirect internal edges.
+ for (uint32_t orig_block_id : orig_bb_set_.Indexes()) {
+ HBasicBlock* orig_block = GetBlockById(orig_block_id);
+
+ for (HBasicBlock* orig_succ : orig_block->GetSuccessors()) {
+ uint32_t orig_succ_id = orig_succ->GetBlockId();
+
+ // Check for outgoing edge.
+ if (!IsInOrigBBSet(orig_succ)) {
+ HBasicBlock* copy_block = GetBlockCopy(orig_block);
+ copy_block->AddSuccessor(orig_succ);
+ continue;
+ }
+
+ auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+ auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+
+ // Due to construction all successors of copied block were set to original.
+ if (copy_redir != remap_copy_internal_->end()) {
+ RemapCopyInternalEdge(orig_block, orig_succ);
+ } else {
+ AddCopyInternalEdge(orig_block, orig_succ);
+ }
+
+ if (orig_redir != remap_orig_internal_->end()) {
+ RemapOrigInternalOrIncomingEdge(orig_block, orig_succ);
+ }
+ }
+ }
+}
+
+void SuperblockCloner::AdjustControlFlowInfo() {
+ ArenaBitVector outer_loop_bb_set(
+ arena_, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+ RecalculateBackEdgesInfo(&outer_loop_bb_set);
+
+ graph_->ClearDominanceInformation();
+ // TODO: Do it locally.
+ graph_->ComputeDominanceInformation();
+}
+
+// TODO: Current FastCase restriction guarantees that instructions' inputs are already mapped to
+// the valid values; only phis' inputs must be adjusted.
+void SuperblockCloner::ResolveDataFlow() {
+ for (auto entry : *bb_map_) {
+ HBasicBlock* orig_block = entry.first;
+
+ for (HInstructionIterator it(orig_block->GetPhis()); !it.Done(); it.Advance()) {
+ HPhi* orig_phi = it.Current()->AsPhi();
+ HPhi* copy_phi = GetInstrCopy(orig_phi)->AsPhi();
+ ResolvePhi(orig_phi);
+ ResolvePhi(copy_phi);
+ }
+ if (kIsDebugBuild) {
+ // Inputs of instruction copies must be already mapped to correspondent inputs copies.
+ for (HInstructionIterator it(orig_block->GetInstructions()); !it.Done(); it.Advance()) {
+ CheckInstructionInputsRemapping(it.Current());
+ }
+ }
+ }
+}
+
+//
+// Debug and logging methods.
+//
+
+void SuperblockCloner::CheckInstructionInputsRemapping(HInstruction* orig_instr) {
+ DCHECK(!orig_instr->IsPhi());
+ HInstruction* copy_instr = GetInstrCopy(orig_instr);
+ for (size_t i = 0, e = orig_instr->InputCount(); i < e; i++) {
+ HInstruction* orig_input = orig_instr->InputAt(i);
+ DCHECK(orig_input->GetBlock()->Dominates(orig_instr->GetBlock()));
+
+ // If original input is defined outside the region then it will remain for both original
+ // instruction and the copy after the transformation.
+ if (!IsInOrigBBSet(orig_input->GetBlock())) {
+ continue;
+ }
+ HInstruction* copy_input = GetInstrCopy(orig_input);
+ DCHECK(copy_input->GetBlock()->Dominates(copy_instr->GetBlock()));
+ }
+
+ // Resolve environment.
+ if (orig_instr->HasEnvironment()) {
+ HEnvironment* orig_env = orig_instr->GetEnvironment();
+
+ for (size_t i = 0, e = orig_env->Size(); i < e; ++i) {
+ HInstruction* orig_input = orig_env->GetInstructionAt(i);
+
+ // If original input is defined outside the region then it will remain for both original
+ // instruction and the copy after the transformation.
+ if (orig_input == nullptr || !IsInOrigBBSet(orig_input->GetBlock())) {
+ continue;
+ }
+
+ HInstruction* copy_input = GetInstrCopy(orig_input);
+ DCHECK(copy_input->GetBlock()->Dominates(copy_instr->GetBlock()));
+ }
+ }
+}
+
+//
+// Public methods.
+//
+
+SuperblockCloner::SuperblockCloner(HGraph* graph,
+ const HBasicBlockSet* orig_bb_set,
+ HBasicBlockMap* bb_map,
+ HInstructionMap* hir_map)
+ : graph_(graph),
+ arena_(graph->GetAllocator()),
+ orig_bb_set_(arena_, orig_bb_set->GetSizeOf(), true, kArenaAllocSuperblockCloner),
+ remap_orig_internal_(nullptr),
+ remap_copy_internal_(nullptr),
+ remap_incoming_(nullptr),
+ bb_map_(bb_map),
+ hir_map_(hir_map),
+ outer_loop_(nullptr),
+ outer_loop_bb_set_(arena_, orig_bb_set->GetSizeOf(), true, kArenaAllocSuperblockCloner) {
+ orig_bb_set_.Copy(orig_bb_set);
+}
+
+void SuperblockCloner::SetSuccessorRemappingInfo(const HEdgeSet* remap_orig_internal,
+ const HEdgeSet* remap_copy_internal,
+ const HEdgeSet* remap_incoming) {
+ remap_orig_internal_ = remap_orig_internal;
+ remap_copy_internal_ = remap_copy_internal;
+ remap_incoming_ = remap_incoming;
+}
+
+bool SuperblockCloner::IsSubgraphClonable() const {
+ // TODO: Support irreducible graphs and graphs with try-catch.
+ if (graph_->HasIrreducibleLoops() || graph_->HasTryCatch()) {
+ return false;
+ }
+
+ // Check that there are no instructions defined in the subgraph and used outside.
+ // TODO: Improve this by accepting graph with such uses but only one exit.
+ for (uint32_t idx : orig_bb_set_.Indexes()) {
+ HBasicBlock* block = GetBlockById(idx);
+
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (!instr->IsClonable() ||
+ IsUsedOutsideRegion(instr, orig_bb_set_)) {
+ return false;
+ }
+ }
+
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* instr = it.Current();
+ if (!instr->IsClonable() ||
+ IsUsedOutsideRegion(instr, orig_bb_set_)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void SuperblockCloner::Run() {
+ DCHECK(bb_map_ != nullptr);
+ DCHECK(hir_map_ != nullptr);
+ DCHECK(remap_orig_internal_ != nullptr &&
+ remap_copy_internal_ != nullptr &&
+ remap_incoming_ != nullptr);
+ DCHECK(IsSubgraphClonable());
+
+ // Find an area in the graph for which control flow information should be adjusted.
+ FindAndSetLocalAreaForAdjustments();
+ // Clone the basic blocks from the orig_bb_set_; data flow is invalid after the call and is to be
+ // adjusted.
+ CloneBasicBlocks();
+ // Connect the blocks together/remap successors and fix phis which are directly affected my the
+ // remapping.
+ RemapEdgesSuccessors();
+ // Recalculate dominance and backedge information which is required by the next stage.
+ AdjustControlFlowInfo();
+ // Fix data flow of the graph.
+ ResolveDataFlow();
+}
+
+void SuperblockCloner::CleanUp() {
+ CleanUpControlFlow();
+
+ // Remove phis which have all inputs being same.
+ // When a block has a single predecessor it must not have any phis. However after the
+ // transformation it could happen that there is such block with a phi with a single input.
+ // As this is needed to be processed we also simplify phis with multiple same inputs here.
+ for (auto entry : *bb_map_) {
+ HBasicBlock* orig_block = entry.first;
+ for (HInstructionIterator inst_it(orig_block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HPhi* phi = inst_it.Current()->AsPhi();
+ if (ArePhiInputsTheSame(phi)) {
+ phi->ReplaceWith(phi->InputAt(0));
+ orig_block->RemovePhi(phi);
+ }
+ }
+
+ HBasicBlock* copy_block = GetBlockCopy(orig_block);
+ for (HInstructionIterator inst_it(copy_block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HPhi* phi = inst_it.Current()->AsPhi();
+ if (ArePhiInputsTheSame(phi)) {
+ phi->ReplaceWith(phi->InputAt(0));
+ copy_block->RemovePhi(phi);
+ }
+ }
+ }
+}
+
+HBasicBlock* SuperblockCloner::CloneBasicBlock(const HBasicBlock* orig_block) {
+ HGraph* graph = orig_block->GetGraph();
+ HBasicBlock* copy_block = new (arena_) HBasicBlock(graph, orig_block->GetDexPc());
+ graph->AddBlock(copy_block);
+
+ // Clone all the phis and add them to the map.
+ for (HInstructionIterator it(orig_block->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* orig_instr = it.Current();
+ HInstruction* copy_instr = orig_instr->Clone(arena_);
+ copy_block->AddPhi(copy_instr->AsPhi());
+ copy_instr->AsPhi()->RemoveAllInputs();
+ DCHECK(!orig_instr->HasEnvironment());
+ hir_map_->Put(orig_instr, copy_instr);
+ }
+
+ // Clone all the instructions and add them to the map.
+ for (HInstructionIterator it(orig_block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* orig_instr = it.Current();
+ HInstruction* copy_instr = orig_instr->Clone(arena_);
+ ReplaceInputsWithCopies(copy_instr);
+ copy_block->AddInstruction(copy_instr);
+ if (orig_instr->HasEnvironment()) {
+ DeepCloneEnvironmentWithRemapping(copy_instr, orig_instr->GetEnvironment());
+ }
+ hir_map_->Put(orig_instr, copy_instr);
+ }
+
+ return copy_block;
+}
+
+void SuperblockCloner::CloneBasicBlocks() {
+ // By this time ReversePostOrder must be valid: in 'CloneBasicBlock' inputs of the copied
+ // instructions might be replaced by copies of the original inputs (depending where those inputs
+ // are defined). So the definitions of the original inputs must be visited before their original
+ // uses. The property of the reducible graphs "if 'A' dom 'B' then rpo_num('A') >= rpo_num('B')"
+ // guarantees that.
+ for (HBasicBlock* orig_block : graph_->GetReversePostOrder()) {
+ if (!IsInOrigBBSet(orig_block)) {
+ continue;
+ }
+ HBasicBlock* copy_block = CloneBasicBlock(orig_block);
+ bb_map_->Put(orig_block, copy_block);
+ if (kSuperblockClonerLogging) {
+ std::cout << "new block :" << copy_block->GetBlockId() << ": " << orig_block->GetBlockId() <<
+ std::endl;
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h
new file mode 100644
index 0000000000..23de692673
--- /dev/null
+++ b/compiler/optimizing/superblock_cloner.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SUPERBLOCK_CLONER_H_
+#define ART_COMPILER_OPTIMIZING_SUPERBLOCK_CLONER_H_
+
+#include "base/arena_bit_vector.h"
+#include "base/arena_containers.h"
+#include "base/bit_vector-inl.h"
+#include "nodes.h"
+
+namespace art {
+
+static const bool kSuperblockClonerLogging = false;
+static const bool kSuperblockClonerVerify = false;
+
+// Represents an edge between two HBasicBlocks.
+//
+// Note: objects of this class are small - pass them by value.
+class HEdge : public ArenaObject<kArenaAllocSuperblockCloner> {
+ public:
+ HEdge(HBasicBlock* from, HBasicBlock* to) : from_(from->GetBlockId()), to_(to->GetBlockId()) {
+ DCHECK_NE(to_, kInvalidBlockId);
+ DCHECK_NE(from_, kInvalidBlockId);
+ }
+ HEdge(uint32_t from, uint32_t to) : from_(from), to_(to) {
+ DCHECK_NE(to_, kInvalidBlockId);
+ DCHECK_NE(from_, kInvalidBlockId);
+ }
+ HEdge() : from_(kInvalidBlockId), to_(kInvalidBlockId) {}
+
+ uint32_t GetFrom() const { return from_; }
+ uint32_t GetTo() const { return to_; }
+
+ bool operator==(const HEdge& other) const {
+ return this->from_ == other.from_ && this->to_ == other.to_;
+ }
+
+ bool operator!=(const HEdge& other) const { return !operator==(other); }
+ void Dump(std::ostream& stream) const;
+
+ // Returns whether an edge represents a valid edge in CF graph: whether the from_ block
+ // has to_ block as a successor.
+ bool IsValid() const { return from_ != kInvalidBlockId && to_ != kInvalidBlockId; }
+
+ private:
+ // Predecessor block id.
+ uint32_t from_;
+ // Successor block id.
+ uint32_t to_;
+};
+
+// Returns whether a HEdge edge corresponds to an existing edge in the graph.
+inline bool IsEdgeValid(HEdge edge, HGraph* graph) {
+ if (!edge.IsValid()) {
+ return false;
+ }
+ uint32_t from = edge.GetFrom();
+ uint32_t to = edge.GetTo();
+ if (from >= graph->GetBlocks().size() || to >= graph->GetBlocks().size()) {
+ return false;
+ }
+
+ HBasicBlock* block_from = graph->GetBlocks()[from];
+ HBasicBlock* block_to = graph->GetBlocks()[to];
+ if (block_from == nullptr || block_to == nullptr) {
+ return false;
+ }
+
+ return block_from->HasSuccessor(block_to, 0);
+}
+
+// SuperblockCloner provides a feature of cloning subgraphs in a smart, high level way without
+// fine grain manipulation with IR; data flow and graph properties are resolved/adjusted
+// automatically. The clone transformation is defined by specifying a set of basic blocks to copy
+// and a set of rules how to treat edges, remap their successors. By using this approach such
+// optimizations as Branch Target Expansion, Loop Peeling, Loop Unrolling can be implemented.
+//
+// The idea of the transformation is based on "Superblock cloning" technique described in the book
+// "Engineering a Compiler. Second Edition", Keith D. Cooper, Linda Torczon, Rice University
+// Houston, Texas. 2nd edition, Morgan Kaufmann. The original paper is "The Superblock: An Efective
+// Technique for VLIW and Superscalar Compilation" by Hwu, W.M.W., Mahlke, S.A., Chen, W.Y. et al.
+// J Supercomput (1993) 7: 229. doi:10.1007/BF01205185.
+//
+// There are two states of the IR graph: original graph (before the transformation) and
+// copy graph (after).
+//
+// Before the transformation:
+// Defining a set of basic block to copy (orig_bb_set) partitions all of the edges in the original
+// graph into 4 categories/sets (use the following notation for edges: "(pred, succ)",
+// where pred, succ - basic blocks):
+// - internal - pred, succ are members of ‘orig_bb_set’.
+// - outside - pred, succ are not members of ‘orig_bb_set’.
+// - incoming - pred is not a member of ‘orig_bb_set’, succ is.
+// - outgoing - pred is a member of ‘orig_bb_set’, succ is not.
+//
+// Transformation:
+//
+// 1. Initial cloning:
+// 1.1. For each ‘orig_block’ in orig_bb_set create a copy ‘copy_block’; these new blocks
+// form ‘copy_bb_set’.
+// 1.2. For each edge (X, Y) from internal set create an edge (X_1, Y_1) where X_1, Y_1 are the
+// copies of X, Y basic blocks correspondingly; these new edges form ‘copy_internal’ edge
+// set.
+// 1.3. For each edge (X, Y) from outgoing set create an edge (X_1, Y_1) where X_1, Y_1 are the
+// copies of X, Y basic blocks correspondingly; these new edges form ‘copy_outgoing’ edge
+// set.
+// 2. Successors remapping.
+// 2.1. 'remap_orig_internal’ - set of edges (X, Y) from ‘orig_bb_set’ whose successors should
+// be remapped to copy nodes: ((X, Y) will be transformed into (X, Y_1)).
+// 2.2. ‘remap_copy_internal’ - set of edges (X_1, Y_1) from ‘copy_bb_set’ whose successors
+// should be remapped to copy nodes: (X_1, Y_1) will be transformed into (X_1, Y)).
+// 2.3. 'remap_incoming’ - set of edges (X, Y) from the ‘incoming’ edge set in the original graph
+// whose successors should be remapped to copies nodes: ((X, Y) will be transformed into
+// (X, Y_1)).
+// 3. Adjust control flow structures and relations (dominance, reverse post order, loops, etc).
+// 4. Fix/resolve data flow.
+// 5. Do cleanups (DCE, critical edges splitting, etc).
+//
+class SuperblockCloner : public ValueObject {
+ public:
+ // TODO: Investigate optimal types for the containers.
+ using HBasicBlockMap = ArenaSafeMap<HBasicBlock*, HBasicBlock*>;
+ using HInstructionMap = ArenaSafeMap<HInstruction*, HInstruction*>;
+ using HBasicBlockSet = ArenaBitVector;
+ using HEdgeSet = ArenaHashSet<HEdge>;
+
+ SuperblockCloner(HGraph* graph,
+ const HBasicBlockSet* orig_bb_set,
+ HBasicBlockMap* bb_map,
+ HInstructionMap* hir_map);
+
+ // Sets edge successor remapping info specified by corresponding edge sets.
+ void SetSuccessorRemappingInfo(const HEdgeSet* remap_orig_internal,
+ const HEdgeSet* remap_copy_internal,
+ const HEdgeSet* remap_incoming);
+
+ // Returns whether the specified subgraph is copyable.
+ // TODO: Start from small range of graph patterns then extend it.
+ bool IsSubgraphClonable() const;
+
+ // Runs the copy algorithm according to the description.
+ void Run();
+
+ // Cleans up the graph after transformation: splits critical edges, recalculates control flow
+ // information (back-edges, dominators, loop info, etc), eliminates redundant phis.
+ void CleanUp();
+
+ // Returns a clone of a basic block (orig_block).
+ //
+ // - The copy block will have no successors/predecessors; they should be set up manually.
+ // - For each instruction in the orig_block a copy is created and inserted into the copy block;
+ // this correspondence is recorded in the map (old instruction, new instruction).
+ // - Graph HIR is not valid after this transformation: all of the HIRs have their inputs the
+ // same, as in the original block, PHIs do not reflect a correct correspondence between the
+ // value and predecessors (as the copy block has no predecessors by now), etc.
+ HBasicBlock* CloneBasicBlock(const HBasicBlock* orig_block);
+
+ // Creates a clone for each basic blocks in orig_bb_set adding corresponding entries into bb_map_
+ // and hir_map_.
+ void CloneBasicBlocks();
+
+ HInstruction* GetInstrCopy(HInstruction* orig_instr) const {
+ auto copy_input_iter = hir_map_->find(orig_instr);
+ DCHECK(copy_input_iter != hir_map_->end());
+ return copy_input_iter->second;
+ }
+
+ HBasicBlock* GetBlockCopy(HBasicBlock* orig_block) const {
+ HBasicBlock* block = bb_map_->Get(orig_block);
+ DCHECK(block != nullptr);
+ return block;
+ }
+
+ HInstruction* GetInstrOrig(HInstruction* copy_instr) const {
+ for (auto it : *hir_map_) {
+ if (it.second == copy_instr) {
+ return it.first;
+ }
+ }
+ return nullptr;
+ }
+
+ bool IsInOrigBBSet(uint32_t block_id) const {
+ return orig_bb_set_.IsBitSet(block_id);
+ }
+
+ bool IsInOrigBBSet(const HBasicBlock* block) const {
+ return IsInOrigBBSet(block->GetBlockId());
+ }
+
+ private:
+ // Fills the 'exits' vector with the subgraph exits.
+ void SearchForSubgraphExits(ArenaVector<HBasicBlock*>* exits);
+
+ // Finds and records information about the area in the graph for which control-flow (back edges,
+ // loops, dominators) needs to be adjusted.
+ void FindAndSetLocalAreaForAdjustments();
+
+ // Remaps edges' successors according to the info specified in the edges sets.
+ //
+ // Only edge successors/predecessors and phis' input records (to have a correspondence between
+ // a phi input record (not value) and a block's predecessor) are adjusted at this stage: neither
+ // phis' nor instructions' inputs values are resolved.
+ void RemapEdgesSuccessors();
+
+ // Adjusts control-flow (back edges, loops, dominators) for the local area defined by
+ // FindAndSetLocalAreaForAdjustments.
+ void AdjustControlFlowInfo();
+
+ // Resolves Data Flow - adjusts phis' and instructions' inputs in order to have a valid graph in
+ // the SSA form.
+ void ResolveDataFlow();
+
+ //
+ // Helpers for CloneBasicBlock.
+ //
+
+ // Adjusts copy instruction's inputs: if the input of the original instruction is defined in the
+ // orig_bb_set, replaces it with a corresponding copy otherwise leaves it the same as original.
+ void ReplaceInputsWithCopies(HInstruction* copy_instr);
+
+ // Recursively clones the environment for the copy instruction. If the input of the original
+ // environment is defined in the orig_bb_set, replaces it with a corresponding copy otherwise
+ // leaves it the same as original.
+ void DeepCloneEnvironmentWithRemapping(HInstruction* copy_instr, const HEnvironment* orig_env);
+
+ //
+ // Helpers for RemapEdgesSuccessors.
+ //
+
+ // Remaps incoming or original internal edge to its copy, adjusts the phi inputs in orig_succ and
+ // copy_succ.
+ void RemapOrigInternalOrIncomingEdge(HBasicBlock* orig_block, HBasicBlock* orig_succ);
+
+ // Adds copy internal edge (from copy_block to copy_succ), updates phis in the copy_succ.
+ void AddCopyInternalEdge(HBasicBlock* orig_block, HBasicBlock* orig_succ);
+
+ // Remaps copy internal edge to its origin, adjusts the phi inputs in orig_succ.
+ void RemapCopyInternalEdge(HBasicBlock* orig_block, HBasicBlock* orig_succ);
+
+ //
+ // Local versions of control flow calculation/adjustment routines.
+ //
+
+ void FindBackEdgesLocal(HBasicBlock* entry_block, ArenaBitVector* local_set);
+ void RecalculateBackEdgesInfo(ArenaBitVector* outer_loop_bb_set);
+ GraphAnalysisResult AnalyzeLoopsLocally(ArenaBitVector* outer_loop_bb_set);
+ void CleanUpControlFlow();
+
+ //
+ // Helpers for ResolveDataFlow
+ //
+
+ // Resolves the inputs of the phi.
+ void ResolvePhi(HPhi* phi);
+
+ //
+ // Debug and logging methods.
+ //
+ void CheckInstructionInputsRemapping(HInstruction* orig_instr);
+
+ HBasicBlock* GetBlockById(uint32_t block_id) const {
+ DCHECK(block_id < graph_->GetBlocks().size());
+ HBasicBlock* block = graph_->GetBlocks()[block_id];
+ DCHECK(block != nullptr);
+ return block;
+ }
+
+ HGraph* const graph_;
+ ArenaAllocator* const arena_;
+
+ // Set of basic block in the original graph to be copied.
+ HBasicBlockSet orig_bb_set_;
+
+ // Sets of edges which require successors remapping.
+ const HEdgeSet* remap_orig_internal_;
+ const HEdgeSet* remap_copy_internal_;
+ const HEdgeSet* remap_incoming_;
+
+ // Correspondence map for blocks: (original block, copy block).
+ HBasicBlockMap* bb_map_;
+ // Correspondence map for instructions: (original HInstruction, copy HInstruction).
+ HInstructionMap* hir_map_;
+ // Area in the graph for which control-flow (back edges, loops, dominators) needs to be adjusted.
+ HLoopInformation* outer_loop_;
+ HBasicBlockSet outer_loop_bb_set_;
+
+ ART_FRIEND_TEST(SuperblockClonerTest, AdjustControlFlowInfo);
+
+ DISALLOW_COPY_AND_ASSIGN(SuperblockCloner);
+};
+
+} // namespace art
+
+namespace std {
+
+template <>
+struct hash<art::HEdge> {
+ size_t operator()(art::HEdge const& x) const noexcept {
+ // Use Cantor pairing function as the hash function.
+ uint32_t a = x.GetFrom();
+ uint32_t b = x.GetTo();
+ return (a + b) * (a + b + 1) / 2 + b;
+ }
+};
+
+} // namespace std
+
+#endif // ART_COMPILER_OPTIMIZING_SUPERBLOCK_CLONER_H_
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index fd77eb81fc..f1b7bffdf5 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -17,11 +17,15 @@
#include "graph_checker.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
+#include "superblock_cloner.h"
#include "gtest/gtest.h"
namespace art {
+using HBasicBlockMap = SuperblockCloner::HBasicBlockMap;
+using HInstructionMap = SuperblockCloner::HInstructionMap;
+
// This class provides methods and helpers for testing various cloning and copying routines:
// individual instruction cloning and cloning of the more coarse-grain structures.
class SuperblockClonerTest : public OptimizingUnitTest {
@@ -182,4 +186,121 @@ TEST_F(SuperblockClonerTest, IndividualInstrCloner) {
EXPECT_NE(new_suspend_check, nullptr);
}
+// Tests SuperblockCloner::CloneBasicBlocks - check instruction cloning and initial remapping of
+// instructions' inputs.
+TEST_F(SuperblockClonerTest, CloneBasicBlocks) {
+ HBasicBlock* header = nullptr;
+ HBasicBlock* loop_body = nullptr;
+ ArenaAllocator* arena = graph_->GetAllocator();
+
+ CreateBasicLoopControlFlow(&header, &loop_body);
+ CreateBasicLoopDataFlow(header, loop_body);
+ graph_->BuildDominatorTree();
+ ASSERT_TRUE(CheckGraph());
+
+ ArenaBitVector orig_bb_set(
+ arena, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+ HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
+ HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
+
+ HLoopInformation* loop_info = header->GetLoopInformation();
+ orig_bb_set.Union(&loop_info->GetBlocks());
+
+ SuperblockCloner cloner(graph_,
+ &orig_bb_set,
+ &bb_map,
+ &hir_map);
+ EXPECT_TRUE(cloner.IsSubgraphClonable());
+
+ cloner.CloneBasicBlocks();
+
+ EXPECT_EQ(bb_map.size(), 2u);
+ EXPECT_EQ(hir_map.size(), 12u);
+
+ for (auto it : hir_map) {
+ HInstruction* orig_instr = it.first;
+ HInstruction* copy_instr = it.second;
+
+ EXPECT_EQ(cloner.GetBlockCopy(orig_instr->GetBlock()), copy_instr->GetBlock());
+ EXPECT_EQ(orig_instr->GetKind(), copy_instr->GetKind());
+ EXPECT_EQ(orig_instr->GetType(), copy_instr->GetType());
+
+ if (orig_instr->IsPhi()) {
+ continue;
+ }
+
+ EXPECT_EQ(orig_instr->InputCount(), copy_instr->InputCount());
+
+ // Check that inputs match.
+ for (size_t i = 0, e = orig_instr->InputCount(); i < e; i++) {
+ HInstruction* orig_input = orig_instr->InputAt(i);
+ HInstruction* copy_input = copy_instr->InputAt(i);
+ if (cloner.IsInOrigBBSet(orig_input->GetBlock())) {
+ EXPECT_EQ(cloner.GetInstrCopy(orig_input), copy_input);
+ } else {
+ EXPECT_EQ(orig_input, copy_input);
+ }
+ }
+
+ EXPECT_EQ(orig_instr->HasEnvironment(), copy_instr->HasEnvironment());
+
+ // Check that environments match.
+ if (orig_instr->HasEnvironment()) {
+ HEnvironment* orig_env = orig_instr->GetEnvironment();
+ HEnvironment* copy_env = copy_instr->GetEnvironment();
+
+ EXPECT_EQ(copy_env->GetParent(), nullptr);
+ EXPECT_EQ(orig_env->Size(), copy_env->Size());
+
+ for (size_t i = 0, e = orig_env->Size(); i < e; i++) {
+ HInstruction* orig_input = orig_env->GetInstructionAt(i);
+ HInstruction* copy_input = copy_env->GetInstructionAt(i);
+ if (cloner.IsInOrigBBSet(orig_input->GetBlock())) {
+ EXPECT_EQ(cloner.GetInstrCopy(orig_input), copy_input);
+ } else {
+ EXPECT_EQ(orig_input, copy_input);
+ }
+ }
+ }
+ }
+}
+
+// SuperblockCloner::CleanUpControlFlow - checks algorithms of local adjustments of the control
+// flow.
+TEST_F(SuperblockClonerTest, AdjustControlFlowInfo) {
+ HBasicBlock* header = nullptr;
+ HBasicBlock* loop_body = nullptr;
+ ArenaAllocator* arena = graph_->GetAllocator();
+
+ CreateBasicLoopControlFlow(&header, &loop_body);
+ CreateBasicLoopDataFlow(header, loop_body);
+ graph_->BuildDominatorTree();
+ ASSERT_TRUE(CheckGraph());
+
+ ArenaBitVector orig_bb_set(
+ arena, graph_->GetBlocks().size(), false, kArenaAllocSuperblockCloner);
+
+ HLoopInformation* loop_info = header->GetLoopInformation();
+ orig_bb_set.Union(&loop_info->GetBlocks());
+
+ SuperblockCloner cloner(graph_,
+ &orig_bb_set,
+ nullptr,
+ nullptr);
+ EXPECT_TRUE(cloner.IsSubgraphClonable());
+
+ cloner.FindAndSetLocalAreaForAdjustments();
+ cloner.CleanUpControlFlow();
+
+ EXPECT_TRUE(CheckGraph());
+
+ EXPECT_TRUE(entry_block_->Dominates(header));
+ EXPECT_TRUE(entry_block_->Dominates(exit_block_));
+
+ EXPECT_EQ(header->GetLoopInformation(), loop_info);
+ EXPECT_EQ(loop_info->GetHeader(), header);
+ EXPECT_TRUE(loop_info->Contains(*loop_body));
+ EXPECT_TRUE(loop_info->IsBackEdge(*loop_body));
+}
+
} // namespace art
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 7e83f8ce5f..33823e2a11 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -30,10 +30,10 @@ namespace art {
class SuspendCheckTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data);
+ void TestCode(const std::vector<uint16_t>& data);
};
-void SuspendCheckTest::TestCode(const uint16_t* data) {
+void SuspendCheckTest::TestCode(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
HBasicBlock* loop_header = first_block->GetSingleSuccessor();
@@ -43,7 +43,7 @@ void SuspendCheckTest::TestCode(const uint16_t* data) {
}
TEST_F(SuspendCheckTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -51,14 +51,14 @@ TEST_F(SuspendCheckTest, CFG1) {
}
TEST_F(SuspendCheckTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data);
}
TEST_F(SuspendCheckTest, CFG3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 0xFFFF,
Instruction::RETURN_VOID);
@@ -67,7 +67,7 @@ TEST_F(SuspendCheckTest, CFG3) {
}
TEST_F(SuspendCheckTest, CFG4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NE, 0xFFFF,
Instruction::RETURN_VOID);
@@ -76,7 +76,7 @@ TEST_F(SuspendCheckTest, CFG4) {
}
TEST_F(SuspendCheckTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQZ, 0xFFFF,
Instruction::RETURN_VOID);
@@ -85,7 +85,7 @@ TEST_F(SuspendCheckTest, CFG5) {
}
TEST_F(SuspendCheckTest, CFG6) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NEZ, 0xFFFF,
Instruction::RETURN_VOID);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 0a094352e4..674dc9a78b 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -153,7 +153,7 @@ const char* const VixlJniHelpersResults[] = {
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2c0 ldr.w ip, [r9, #704] ; 0x2c0\n",
+ " 224: f8d9 c2c4 ldr.w ip, [r9, #708] ; 0x2c4\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h
index 203e484fb7..7023b9a0e8 100644
--- a/compiler/utils/atomic_dex_ref_map-inl.h
+++ b/compiler/utils/atomic_dex_ref_map-inl.h
@@ -75,6 +75,18 @@ inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Get(const DexFileRefer
}
template <typename DexFileReferenceType, typename Value>
+inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Remove(const DexFileReferenceType& ref,
+ Value* out) {
+ ElementArray* const array = GetArray(ref.dex_file);
+ if (array == nullptr) {
+ return false;
+ }
+ *out = (*array)[ref.index].LoadRelaxed();
+ (*array)[ref.index].StoreSequentiallyConsistent(nullptr);
+ return true;
+}
+
+template <typename DexFileReferenceType, typename Value>
inline void AtomicDexRefMap<DexFileReferenceType, Value>::AddDexFile(const DexFile* dex_file) {
arrays_.Put(dex_file, std::move(ElementArray(NumberOfDexIndices(dex_file))));
}
diff --git a/compiler/utils/atomic_dex_ref_map.h b/compiler/utils/atomic_dex_ref_map.h
index 9ff506d6a4..3474e16b8d 100644
--- a/compiler/utils/atomic_dex_ref_map.h
+++ b/compiler/utils/atomic_dex_ref_map.h
@@ -45,6 +45,9 @@ class AtomicDexRefMap {
// Retreive an item, returns false if the dex file is not added.
bool Get(const DexFileReferenceType& ref, Value* out) const;
+ // Remove an item and return the existing value. Returns false if the dex file is not added.
+ bool Remove(const DexFileReferenceType& ref, Value* out);
+
// Dex files must be added before method references belonging to them can be used as keys. Not
// thread safe.
void AddDexFile(const DexFile* dex_file);
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
index 04fba51dc1..58f1ec7b08 100644
--- a/compiler/utils/test_dex_file_builder.h
+++ b/compiler/utils/test_dex_file_builder.h
@@ -27,6 +27,7 @@
#include <android-base/logging.h>
#include "base/bit_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "dex/standard_dex_file.h"
@@ -233,7 +234,8 @@ class TestDexFileBuilder {
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(
&dex_file_data_[0],
dex_file_data_.size(),
dex_location,
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 9fcede5e97..8640e2db0e 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2100,6 +2100,14 @@ void X86Assembler::addl(const Address& address, const Immediate& imm) {
}
+void X86Assembler::addw(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
+ EmitUint8(0x66);
+ EmitComplex(0, address, imm, /* is_16_op */ true);
+}
+
+
void X86Assembler::adcl(Register reg, const Immediate& imm) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitComplex(2, Operand(reg), imm);
@@ -2751,14 +2759,20 @@ void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
}
-void X86Assembler::EmitImmediate(const Immediate& imm) {
- EmitInt32(imm.value());
+void X86Assembler::EmitImmediate(const Immediate& imm, bool is_16_op) {
+ if (is_16_op) {
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8(imm.value() >> 8);
+ } else {
+ EmitInt32(imm.value());
+ }
}
void X86Assembler::EmitComplex(int reg_or_opcode,
const Operand& operand,
- const Immediate& immediate) {
+ const Immediate& immediate,
+ bool is_16_op) {
CHECK_GE(reg_or_opcode, 0);
CHECK_LT(reg_or_opcode, 8);
if (immediate.is_int8()) {
@@ -2769,11 +2783,11 @@ void X86Assembler::EmitComplex(int reg_or_opcode,
} else if (operand.IsRegister(EAX)) {
// Use short form if the destination is eax.
EmitUint8(0x05 + (reg_or_opcode << 3));
- EmitImmediate(immediate);
+ EmitImmediate(immediate, is_16_op);
} else {
EmitUint8(0x81);
EmitOperand(reg_or_opcode, operand);
- EmitImmediate(immediate);
+ EmitImmediate(immediate, is_16_op);
}
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index f3b516cb7e..a085677083 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -634,6 +634,7 @@ class X86Assembler FINAL : public Assembler {
void addl(const Address& address, Register reg);
void addl(const Address& address, const Immediate& imm);
+ void addw(const Address& address, const Immediate& imm);
void adcl(Register dst, Register src);
void adcl(Register reg, const Immediate& imm);
@@ -817,8 +818,9 @@ class X86Assembler FINAL : public Assembler {
inline void EmitOperandSizeOverride();
void EmitOperand(int rm, const Operand& operand);
- void EmitImmediate(const Immediate& imm);
- void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
+ void EmitImmediate(const Immediate& imm, bool is_16_op = false);
+ void EmitComplex(
+ int rm, const Operand& operand, const Immediate& immediate, bool is_16_op = false);
void EmitLabel(Label* label, int instruction_size);
void EmitLabelLink(Label* label);
void EmitLabelLink(NearLabel* label);
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 36c5c3c0c4..937dd80c4e 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -258,6 +258,10 @@ TEST_F(AssemblerX86Test, MovlLoad) {
DriverStr(RepeatRA(&x86::X86Assembler::movl, "movl {mem}, %{reg}"), "movl-load");
}
+TEST_F(AssemblerX86Test, Addw) {
+ DriverStr(RepeatAI(&x86::X86Assembler::addw, /*imm_bytes*/ 2U, "addw ${imm}, {mem}"), "addw");
+}
+
TEST_F(AssemblerX86Test, MovlStore) {
DriverStr(RepeatAR(&x86::X86Assembler::movl, "movl %{reg}, {mem}"), "movl-store");
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 51f61ca756..feabf260af 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2608,6 +2608,15 @@ void X86_64Assembler::addl(const Address& address, const Immediate& imm) {
}
+void X86_64Assembler::addw(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_uint16() || imm.is_int16()) << imm.value();
+ EmitUint8(0x66);
+ EmitOptionalRex32(address);
+ EmitComplex(0, address, imm, /* is_16_op */ true);
+}
+
+
void X86_64Assembler::subl(CpuRegister dst, CpuRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -3387,8 +3396,11 @@ void X86_64Assembler::EmitOperand(uint8_t reg_or_opcode, const Operand& operand)
}
-void X86_64Assembler::EmitImmediate(const Immediate& imm) {
- if (imm.is_int32()) {
+void X86_64Assembler::EmitImmediate(const Immediate& imm, bool is_16_op) {
+ if (is_16_op) {
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8(imm.value() >> 8);
+ } else if (imm.is_int32()) {
EmitInt32(static_cast<int32_t>(imm.value()));
} else {
EmitInt64(imm.value());
@@ -3398,7 +3410,8 @@ void X86_64Assembler::EmitImmediate(const Immediate& imm) {
void X86_64Assembler::EmitComplex(uint8_t reg_or_opcode,
const Operand& operand,
- const Immediate& immediate) {
+ const Immediate& immediate,
+ bool is_16_op) {
CHECK_GE(reg_or_opcode, 0);
CHECK_LT(reg_or_opcode, 8);
if (immediate.is_int8()) {
@@ -3409,11 +3422,11 @@ void X86_64Assembler::EmitComplex(uint8_t reg_or_opcode,
} else if (operand.IsRegister(CpuRegister(RAX))) {
// Use short form if the destination is eax.
EmitUint8(0x05 + (reg_or_opcode << 3));
- EmitImmediate(immediate);
+ EmitImmediate(immediate, is_16_op);
} else {
EmitUint8(0x81);
EmitOperand(reg_or_opcode, operand);
- EmitImmediate(immediate);
+ EmitImmediate(immediate, is_16_op);
}
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 0d24a751c0..7a5fdb502f 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -693,6 +693,7 @@ class X86_64Assembler FINAL : public Assembler {
void addl(CpuRegister reg, const Address& address);
void addl(const Address& address, CpuRegister reg);
void addl(const Address& address, const Immediate& imm);
+ void addw(const Address& address, const Immediate& imm);
void addq(CpuRegister reg, const Immediate& imm);
void addq(CpuRegister dst, CpuRegister src);
@@ -904,8 +905,9 @@ class X86_64Assembler FINAL : public Assembler {
void EmitOperandSizeOverride();
void EmitOperand(uint8_t rm, const Operand& operand);
- void EmitImmediate(const Immediate& imm);
- void EmitComplex(uint8_t rm, const Operand& operand, const Immediate& immediate);
+ void EmitImmediate(const Immediate& imm, bool is_16_op = false);
+ void EmitComplex(
+ uint8_t rm, const Operand& operand, const Immediate& immediate, bool is_16_op = false);
void EmitLabel(Label* label, int instruction_size);
void EmitLabelLink(Label* label);
void EmitLabelLink(NearLabel* label);
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 0cb3ffd39f..5e6c83396a 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -578,6 +578,11 @@ TEST_F(AssemblerX86_64Test, AddlImm) {
"add ${imm}, %{reg}"), "addli");
}
+TEST_F(AssemblerX86_64Test, Addw) {
+ DriverStr(
+ RepeatAI(&x86_64::X86_64Assembler::addw, /*imm_bytes*/2U, "addw ${imm}, {mem}"), "addw");
+}
+
TEST_F(AssemblerX86_64Test, ImulqReg1) {
DriverStr(RepeatR(&x86_64::X86_64Assembler::imulq, "imulq %{reg}"), "imulq");
}
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 6bebf7d2da..ab06ddda2d 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -33,8 +33,6 @@ art_cc_defaults {
},
generated_sources: ["art_dex2oat_operator_srcs"],
shared_libs: [
- "libart-compiler",
- "libart-dexlayout",
"libbase",
"liblz4",
"liblzma",
@@ -69,6 +67,7 @@ art_cc_static_library {
defaults: ["libart-dex2oat-defaults"],
shared_libs: [
"libart-compiler",
+ "libart-dexlayout",
"libart",
],
}
@@ -81,6 +80,7 @@ art_cc_static_library {
],
shared_libs: [
"libartd-compiler",
+ "libartd-dexlayout",
"libartd",
],
}
@@ -106,7 +106,6 @@ cc_defaults {
compile_multilib: "prefer32",
},
},
-
header_libs: [
"dex2oat_headers",
"art_cmdlineparser_headers",
@@ -122,6 +121,7 @@ art_cc_binary {
"libart-compiler",
"libart-dexlayout",
"libart",
+ "libdexfile",
"libbase",
"liblz4",
"libsigchain",
@@ -129,6 +129,17 @@ art_cc_binary {
static_libs: [
"libart-dex2oat",
],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ cflags: [
+ // Ignore frame-size increase resulting from instrumentation.
+ "-Wno-frame-larger-than=",
+ "-DART_PGO_INSTRUMENTATION",
+ ],
+ }
}
art_cc_binary {
@@ -141,6 +152,7 @@ art_cc_binary {
"libartd-compiler",
"libartd-dexlayout",
"libartd",
+ "libdexfile",
"libbase",
"liblz4",
"libsigchain",
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index dabe07f9ce..c4e53987eb 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -605,6 +605,7 @@ class Dex2Oat FINAL {
input_vdex_fd_(-1),
output_vdex_fd_(-1),
input_vdex_file_(nullptr),
+ dm_fd_(-1),
zip_fd_(-1),
image_base_(0U),
image_classes_zip_filename_(nullptr),
@@ -757,6 +758,11 @@ class Dex2Oat FINAL {
Usage("--oat-fd should not be used with --image");
}
+ if ((input_vdex_fd_ != -1 || !input_vdex_.empty()) &&
+ (dm_fd_ != -1 || !dm_file_location_.empty())) {
+ Usage("An input vdex should not be passed with a .dm file");
+ }
+
if (!parser_options->oat_symbols.empty() &&
parser_options->oat_symbols.size() != oat_filenames_.size()) {
Usage("--oat-file arguments do not match --oat-symbols arguments");
@@ -1176,6 +1182,8 @@ class Dex2Oat FINAL {
AssignIfExists(args, M::OutputVdexFd, &output_vdex_fd_);
AssignIfExists(args, M::InputVdex, &input_vdex_);
AssignIfExists(args, M::OutputVdex, &output_vdex_);
+ AssignIfExists(args, M::DmFd, &dm_fd_);
+ AssignIfExists(args, M::DmFile, &dm_file_location_);
AssignIfExists(args, M::OatFd, &oat_fd_);
AssignIfExists(args, M::OatLocation, &oat_location_);
AssignIfExists(args, M::Watchdog, &parser_options->watch_dog_enabled);
@@ -1389,6 +1397,42 @@ class Dex2Oat FINAL {
}
}
+ if (dm_fd_ != -1 || !dm_file_location_.empty()) {
+ std::string error_msg;
+ if (dm_fd_ != -1) {
+ dm_file_.reset(ZipArchive::OpenFromFd(dm_fd_, "DexMetadata", &error_msg));
+ } else {
+ dm_file_.reset(ZipArchive::Open(dm_file_location_.c_str(), &error_msg));
+ }
+ if (dm_file_ == nullptr) {
+ LOG(WARNING) << "Could not open DexMetadata archive " << error_msg;
+ }
+ }
+
+ if (dm_file_ != nullptr) {
+ DCHECK(input_vdex_file_ == nullptr);
+ std::string error_msg;
+ static const char* kDexMetadata = "DexMetadata";
+ std::unique_ptr<ZipEntry> zip_entry(dm_file_->Find(VdexFile::kVdexNameInDmFile, &error_msg));
+ if (zip_entry == nullptr) {
+ LOG(INFO) << "No " << VdexFile::kVdexNameInDmFile << " file in DexMetadata archive. "
+ << "Not doing fast verification.";
+ } else {
+ std::unique_ptr<MemMap> input_file;
+ if (zip_entry->IsUncompressed()) {
+ input_file.reset(zip_entry->MapDirectlyFromFile(VdexFile::kVdexNameInDmFile, &error_msg));
+ } else {
+ input_file.reset(zip_entry->ExtractToMemMap(
+ kDexMetadata, VdexFile::kVdexNameInDmFile, &error_msg));
+ }
+ if (input_file == nullptr) {
+ LOG(WARNING) << "Could not open vdex file in DexMetadata archive: " << error_msg;
+ } else {
+ input_vdex_file_ = std::make_unique<VdexFile>(input_file.release());
+ }
+ }
+ }
+
// Swap file handling
//
// If the swap fd is not -1, we assume this is the file descriptor of an open but unlinked file
@@ -1548,7 +1592,7 @@ class Dex2Oat FINAL {
for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
rodata_.push_back(elf_writers_[i]->StartRoData());
// Unzip or copy dex files straight to the oat file.
- std::unique_ptr<MemMap> opened_dex_files_map;
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// No need to verify the dex file for:
// 1) Dexlayout since it does the verification. It also may not pass the verification since
@@ -1568,14 +1612,16 @@ class Dex2Oat FINAL {
return dex2oat::ReturnCode::kOther;
}
dex_files_per_oat_file_.push_back(MakeNonOwningPointerVector(opened_dex_files));
- if (opened_dex_files_map != nullptr) {
- opened_dex_files_maps_.push_back(std::move(opened_dex_files_map));
+ if (opened_dex_files_map.empty()) {
+ DCHECK(opened_dex_files.empty());
+ } else {
+ for (std::unique_ptr<MemMap>& map : opened_dex_files_map) {
+ opened_dex_files_maps_.push_back(std::move(map));
+ }
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
dex_file_oat_index_map_.emplace(dex_file.get(), i);
opened_dex_files_.push_back(std::move(dex_file));
}
- } else {
- DCHECK(opened_dex_files.empty());
}
}
}
@@ -1807,9 +1853,7 @@ class Dex2Oat FINAL {
// We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
// optimization does not depend on the boot image (the optimization relies on not
// having final fields in a class, which does not change for an app).
- VdexFile::Unquicken(dex_files_,
- input_vdex_file_->GetQuickeningInfo(),
- /* decompile_return_instruction */ false);
+ input_vdex_file_->Unquicken(dex_files_, /* decompile_return_instruction */ false);
} else {
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
@@ -1831,6 +1875,7 @@ class Dex2Oat FINAL {
jobject class_loader = nullptr;
if (!IsBootImage()) {
class_loader = class_loader_context_->CreateClassLoader(dex_files_);
+ callbacks_->SetDexFiles(&dex_files);
}
// Register dex caches and key them to the class loader so that they only unload when the
@@ -2015,8 +2060,8 @@ class Dex2Oat FINAL {
text_size,
oat_writer->GetBssSize(),
oat_writer->GetBssMethodsOffset(),
- oat_writer->GetBssRootsOffset());
-
+ oat_writer->GetBssRootsOffset(),
+ oat_writer->GetVdexSize());
if (IsImage()) {
// Update oat layout.
DCHECK(image_writer_ != nullptr);
@@ -2043,7 +2088,8 @@ class Dex2Oat FINAL {
// We need to mirror the layout of the ELF file in the compressed debug-info.
// Therefore PrepareDebugInfo() relies on the SetLoadedSectionSizes() call further above.
- elf_writer->PrepareDebugInfo(oat_writer->GetMethodDebugInfo());
+ debug::DebugInfo debug_info = oat_writer->GetDebugInfo(); // Keep the variable alive.
+ elf_writer->PrepareDebugInfo(debug_info); // Processes the data on background thread.
linker::OutputStream*& rodata = rodata_[i];
DCHECK(rodata != nullptr);
@@ -2077,7 +2123,7 @@ class Dex2Oat FINAL {
}
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer->GetDebugInfo());
if (!elf_writer->End()) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
@@ -2238,7 +2284,7 @@ class Dex2Oat FINAL {
}
bool DoEagerUnquickeningOfVdex() const {
- return MayInvalidateVdexMetadata();
+ return MayInvalidateVdexMetadata() && dm_file_ == nullptr;
}
bool LoadProfile() {
@@ -2402,7 +2448,7 @@ class Dex2Oat FINAL {
bool AddDexFileSources() {
TimingLogger::ScopedTiming t2("AddDexFileSources", timings_);
- if (input_vdex_file_ != nullptr) {
+ if (input_vdex_file_ != nullptr && input_vdex_file_->HasDexSection()) {
DCHECK_EQ(oat_writers_.size(), 1u);
const std::string& name = zip_location_.empty() ? dex_locations_[0] : zip_location_;
DCHECK(!name.empty());
@@ -2788,6 +2834,9 @@ class Dex2Oat FINAL {
std::string input_vdex_;
std::string output_vdex_;
std::unique_ptr<VdexFile> input_vdex_file_;
+ int dm_fd_;
+ std::string dm_file_location_;
+ std::unique_ptr<ZipArchive> dm_file_;
std::vector<const char*> dex_filenames_;
std::vector<const char*> dex_locations_;
int zip_fd_;
@@ -3063,9 +3112,9 @@ static dex2oat::ReturnCode Dex2oat(int argc, char** argv) {
int main(int argc, char** argv) {
int result = static_cast<int>(art::Dex2oat(argc, argv));
// Everything was done, do an explicit exit here to avoid running Runtime destructors that take
- // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class
- // should not destruct the runtime in this case.
- if (!art::kIsDebugBuild && (RUNNING_ON_MEMORY_TOOL == 0)) {
+ // time (bug 10645725) unless we're a debug or instrumented build or running on valgrind. Note:
+ // The Dex2Oat class should not destruct the runtime in this case.
+ if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && (RUNNING_ON_MEMORY_TOOL == 0)) {
_exit(result);
}
return result;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 980363b1bb..05592f1806 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -29,6 +29,7 @@
#include "base/file_utils.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "jit/profile_compilation_info.h"
@@ -65,12 +66,13 @@ class Dex2oatImageTest : public CommonRuntimeTest {
for (const std::string& dex : GetLibCoreDexFileNames()) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- CHECK(DexFileLoader::Open(dex.c_str(),
- dex,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg,
- &dex_files))
+ const ArtDexFileLoader dex_file_loader;
+ CHECK(dex_file_loader.Open(dex.c_str(),
+ dex,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg,
+ &dex_files))
<< error_msg;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index d9b4ea7835..0eecc84605 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -86,6 +86,12 @@ static void AddGeneratedArtifactMappings(Builder& builder) {
.Define("--output-vdex=_")
.WithType<std::string>()
.IntoKey(M::OutputVdex)
+ .Define("--dm-fd=_")
+ .WithType<int>()
+ .IntoKey(M::DmFd)
+ .Define("--dm-file=_")
+ .WithType<std::string>()
+ .IntoKey(M::DmFile)
.Define("--oat-file=_")
.WithType<std::vector<std::string>>().AppendValues()
.IntoKey(M::OatFiles)
@@ -249,8 +255,6 @@ static Parser CreateArgumentParser() {
return parser_builder->Build();
}
-#pragma GCC diagnostic pop
-
std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
const char** argv,
std::string* error_msg) {
@@ -264,4 +268,5 @@ std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap()));
}
+#pragma GCC diagnostic pop
} // namespace art
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index 9362a3df6f..9a8bdf4aee 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -43,6 +43,8 @@ DEX2OAT_OPTIONS_KEY (int, InputVdexFd)
DEX2OAT_OPTIONS_KEY (std::string, InputVdex)
DEX2OAT_OPTIONS_KEY (int, OutputVdexFd)
DEX2OAT_OPTIONS_KEY (std::string, OutputVdex)
+DEX2OAT_OPTIONS_KEY (int, DmFd)
+DEX2OAT_OPTIONS_KEY (std::string, DmFile)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatFiles)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatSymbols)
DEX2OAT_OPTIONS_KEY (int, OatFd)
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index f176cc2839..5614ac6458 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -30,6 +30,7 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "bytecode_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -108,6 +109,8 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
[](const OatFile&) {});
}
+ bool test_accepts_odex_file_on_failure = false;
+
template <typename T>
void GenerateOdexForTest(
const std::string& dex_location,
@@ -124,7 +127,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
&error_msg,
extra_args,
use_fd);
- bool success = (status == 0);
+ bool success = (WIFEXITED(status) && WEXITSTATUS(status) == 0);
if (expect_success) {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
@@ -146,16 +149,18 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
error_msg_ = error_msg;
- // Verify there's no loadable odex file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
- odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- dex_location.c_str(),
- &error_msg));
- ASSERT_TRUE(odex_file.get() == nullptr);
+ if (!test_accepts_odex_file_on_failure) {
+ // Verify there's no loadable odex file.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() == nullptr);
+ }
}
}
@@ -680,7 +685,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_TRUE(dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
@@ -777,7 +783,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
app_image_file_name,
/* use_fd */ true,
/* num_profile_classes */ 1,
- { input_vdex, output_vdex, kDisableCompactDex });
+ { input_vdex, output_vdex });
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
{
@@ -815,7 +821,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_TRUE(dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -897,7 +904,7 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kQuicken,
- { input_vdex, output_vdex, kDisableCompactDex },
+ { input_vdex, output_vdex },
/* expect_success */ true,
/* use_fd */ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
@@ -993,7 +1000,12 @@ TEST_F(Dex2oatWatchdogTest, TestWatchdogOK) {
TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND(); // b/63052624
- TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // b/63052624
+
+ // The watchdog is independent of dex2oat and will not delete intermediates. It is possible
+ // that the compilation succeeds and the file is completely written by the time the watchdog
+ // kills dex2oat (but the dex2oat threads must have been scheduled pretty badly).
+ test_accepts_odex_file_on_failure = true;
+
// Check with ten milliseconds.
RunTest(false, { "--watchdog-timeout=10" });
}
@@ -1124,7 +1136,7 @@ TEST_F(Dex2oatClassLoaderContextTest, ContextWithStrippedDexFilesBackedByOdex) {
std::string expected_classpath_key;
{
// Open the oat file to get the expected classpath.
- OatFileAssistant oat_file_assistant(stripped_classpath.c_str(), kRuntimeISA, false);
+ OatFileAssistant oat_file_assistant(stripped_classpath.c_str(), kRuntimeISA, false, false);
std::unique_ptr<OatFile> oat_file(oat_file_assistant.GetBestOatFile());
std::vector<std::unique_ptr<const DexFile>> oat_dex_files =
OatFileAssistant::LoadDexFiles(*oat_file, stripped_classpath.c_str());
@@ -1516,4 +1528,19 @@ TEST_F(Dex2oatDedupeCode, DedupeTest) {
EXPECT_LT(dedupe_size, no_dedupe_size);
}
+TEST_F(Dex2oatTest, UncompressedTest) {
+ std::unique_ptr<const DexFile> dex(OpenTestDexFile("MainUncompressed"));
+ std::string out_dir = GetScratchDir();
+ const std::string base_oat_name = out_dir + "/base.oat";
+ GenerateOdexForTest(dex->GetLocation(),
+ base_oat_name,
+ CompilerFilter::Filter::kQuicken,
+ { },
+ true, // expect_success
+ false, // use_fd
+ [](const OatFile& o) {
+ CHECK(!o.ContainsDexCode());
+ });
+}
+
} // namespace art
diff --git a/dex2oat/linker/elf_writer.h b/dex2oat/linker/elf_writer.h
index 0eb36eda0f..7c4774038e 100644
--- a/dex2oat/linker/elf_writer.h
+++ b/dex2oat/linker/elf_writer.h
@@ -25,6 +25,7 @@
#include "base/array_ref.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "debug/debug_info.h"
#include "os.h"
namespace art {
@@ -55,18 +56,24 @@ class ElfWriter {
virtual ~ElfWriter() {}
virtual void Start() = 0;
+ // Prepares memory layout of the whole ELF file, and creates dynamic symbols
+ // which point to specific areas of interest (usually section begin and end).
+ // This is needed as multi-image needs to know the memory layout of all ELF
+ // files, before starting to write them.
+ // This method must be called before calling GetLoadedSize().
virtual void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) = 0;
- virtual void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
+ size_t bss_roots_offset,
+ size_t dex_section_size) = 0;
+ virtual void PrepareDebugInfo(const debug::DebugInfo& debug_info) = 0;
virtual OutputStream* StartRoData() = 0;
virtual void EndRoData(OutputStream* rodata) = 0;
virtual OutputStream* StartText() = 0;
virtual void EndText(OutputStream* text) = 0;
virtual void WriteDynamicSection() = 0;
- virtual void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
+ virtual void WriteDebugInfo(const debug::DebugInfo& debug_info) = 0;
virtual bool End() = 0;
// Get the ELF writer's stream. This stream can be used for writing data directly
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index aa64b7d59e..707e877cfb 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -54,22 +54,28 @@ class DebugInfoTask : public Task {
public:
DebugInfoTask(InstructionSet isa,
const InstructionSetFeatures* features,
- size_t rodata_section_size,
+ uint64_t text_section_address,
size_t text_section_size,
- const ArrayRef<const debug::MethodDebugInfo>& method_infos)
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const debug::DebugInfo& debug_info)
: isa_(isa),
instruction_set_features_(features),
- rodata_section_size_(rodata_section_size),
+ text_section_address_(text_section_address),
text_section_size_(text_section_size),
- method_infos_(method_infos) {
+ dex_section_address_(dex_section_address),
+ dex_section_size_(dex_section_size),
+ debug_info_(debug_info) {
}
void Run(Thread*) {
result_ = debug::MakeMiniDebugInfo(isa_,
instruction_set_features_,
- kPageSize + rodata_section_size_, // .text address.
+ text_section_address_,
text_section_size_,
- method_infos_);
+ dex_section_address_,
+ dex_section_size_,
+ debug_info_);
}
std::vector<uint8_t>* GetResult() {
@@ -79,9 +85,11 @@ class DebugInfoTask : public Task {
private:
InstructionSet isa_;
const InstructionSetFeatures* instruction_set_features_;
- size_t rodata_section_size_;
+ uint64_t text_section_address_;
size_t text_section_size_;
- const ArrayRef<const debug::MethodDebugInfo> method_infos_;
+ uint64_t dex_section_address_;
+ size_t dex_section_size_;
+ const debug::DebugInfo& debug_info_;
std::vector<uint8_t> result_;
};
@@ -99,14 +107,15 @@ class ElfWriterQuick FINAL : public ElfWriter {
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) OVERRIDE;
- void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
+ size_t bss_roots_offset,
+ size_t dex_section_size) OVERRIDE;
+ void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
OutputStream* StartRoData() OVERRIDE;
void EndRoData(OutputStream* rodata) OVERRIDE;
OutputStream* StartText() OVERRIDE;
void EndText(OutputStream* text) OVERRIDE;
void WriteDynamicSection() OVERRIDE;
- void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
+ void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
bool End() OVERRIDE;
virtual OutputStream* GetStream() OVERRIDE;
@@ -123,6 +132,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
size_t rodata_size_;
size_t text_size_;
size_t bss_size_;
+ size_t dex_section_size_;
std::unique_ptr<BufferedOutputStream> output_stream_;
std::unique_ptr<ElfBuilder<ElfTypes>> builder_;
std::unique_ptr<DebugInfoTask> debug_info_task_;
@@ -162,6 +172,7 @@ ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
rodata_size_(0u),
text_size_(0u),
bss_size_(0u),
+ dex_section_size_(0u),
output_stream_(
std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(elf_file))),
builder_(new ElfBuilder<ElfTypes>(instruction_set, features, output_stream_.get())) {}
@@ -183,19 +194,23 @@ void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) {
+ size_t bss_roots_offset,
+ size_t dex_section_size) {
DCHECK_EQ(rodata_size_, 0u);
rodata_size_ = rodata_size;
DCHECK_EQ(text_size_, 0u);
text_size_ = text_size;
DCHECK_EQ(bss_size_, 0u);
bss_size_ = bss_size;
+ DCHECK_EQ(dex_section_size_, 0u);
+ dex_section_size_ = dex_section_size;
builder_->PrepareDynamicSection(elf_file_->GetPath(),
rodata_size_,
text_size_,
bss_size_,
bss_methods_offset,
- bss_roots_offset);
+ bss_roots_offset,
+ dex_section_size);
}
template <typename ElfTypes>
@@ -234,17 +249,18 @@ void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
- const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
- if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
+void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_info) {
+ if (!debug_info.Empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
// Prepare the mini-debug-info in background while we do other I/O.
Thread* self = Thread::Current();
debug_info_task_ = std::unique_ptr<DebugInfoTask>(
new DebugInfoTask(builder_->GetIsa(),
instruction_set_features_,
- rodata_size_,
+ builder_->GetText()->GetAddress(),
text_size_,
- method_infos));
+ builder_->GetDex()->Exists() ? builder_->GetDex()->GetAddress() : 0,
+ dex_section_size_,
+ debug_info));
debug_info_thread_pool_ = std::unique_ptr<ThreadPool>(
new ThreadPool("Mini-debug-info writer", 1));
debug_info_thread_pool_->AddTask(self, debug_info_task_.get());
@@ -253,12 +269,11 @@ void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
- const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
- if (!method_infos.empty()) {
+void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info) {
+ if (!debug_info.Empty()) {
if (compiler_options_->GetGenerateDebugInfo()) {
// Generate all the debug information we can.
- debug::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat, true /* write_oat_patches */);
+ debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
}
if (compiler_options_->GetGenerateMiniDebugInfo()) {
// Wait for the mini-debug-info generation to finish and write it to disk.
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 85145d3d64..62519fc48e 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -253,7 +253,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
}
std::vector<OutputStream*> rodata;
- std::vector<std::unique_ptr<MemMap>> opened_dex_files_map;
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
// Now that we have finalized key_value_store_, start writing the oat file.
for (size_t i = 0, size = oat_writers.size(); i != size; ++i) {
@@ -266,7 +266,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
dex_file->GetLocation().c_str(),
dex_file->GetLocationChecksum());
- std::unique_ptr<MemMap> cur_opened_dex_files_map;
+ std::vector<std::unique_ptr<MemMap>> cur_opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
vdex_files[i].GetFile(),
@@ -276,12 +276,14 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
&key_value_store,
/* verify */ false, // Dex files may be dex-to-dex-ed, don't verify.
/* update_input_vdex */ false,
- &cur_opened_dex_files_map,
+ &cur_opened_dex_files_maps,
&cur_opened_dex_files);
ASSERT_TRUE(dex_files_ok);
- if (cur_opened_dex_files_map != nullptr) {
- opened_dex_files_map.push_back(std::move(cur_opened_dex_files_map));
+ if (!cur_opened_dex_files_maps.empty()) {
+ for (std::unique_ptr<MemMap>& cur_map : cur_opened_dex_files_maps) {
+ opened_dex_files_maps.push_back(std::move(cur_map));
+ }
for (std::unique_ptr<const DexFile>& cur_dex_file : cur_opened_dex_files) {
// dex_file_oat_index_map_.emplace(dex_file.get(), i);
opened_dex_files.push_back(std::move(cur_dex_file));
@@ -316,7 +318,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
text_size,
oat_writer->GetBssSize(),
oat_writer->GetBssMethodsOffset(),
- oat_writer->GetBssRootsOffset());
+ oat_writer->GetBssRootsOffset(),
+ oat_writer->GetVdexSize());
writer->UpdateOatFileLayout(i,
elf_writer->GetLoadedSize(),
@@ -338,7 +341,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer->GetDebugInfo());
bool success = elf_writer->End();
ASSERT_TRUE(success);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 16d70daddf..0953e0813f 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -33,11 +33,13 @@
#include "class_table-inl.h"
#include "compiled_method-inl.h"
#include "debug/method_debug_info.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
#include "dex/standard_dex_file.h"
#include "dex/verification_results.h"
+#include "dex_container.h"
#include "dexlayout.h"
#include "driver/compiler_driver-inl.h"
#include "driver/compiler_options.h"
@@ -45,6 +47,7 @@
#include "gc/space/space.h"
#include "handle_scope-inl.h"
#include "image_writer.h"
+#include "jit/profile_compilation_info.h"
#include "linker/buffered_output_stream.h"
#include "linker/file_output_stream.h"
#include "linker/index_bss_mapping_encoder.h"
@@ -57,6 +60,7 @@
#include "mirror/object-inl.h"
#include "oat_quick_method_header.h"
#include "os.h"
+#include "quicken_info.h"
#include "safe_map.h"
#include "scoped_thread_state_change-inl.h"
#include "type_lookup_table.h"
@@ -364,9 +368,11 @@ OatWriter::OatWriter(bool compiling_boot_image,
compiler_driver_(nullptr),
image_writer_(nullptr),
compiling_boot_image_(compiling_boot_image),
+ only_contains_uncompressed_zip_entries_(false),
dex_files_(nullptr),
vdex_size_(0u),
vdex_dex_files_offset_(0u),
+ vdex_dex_shared_data_offset_(0u),
vdex_verifier_deps_offset_(0u),
vdex_quickening_info_offset_(0u),
oat_size_(0u),
@@ -637,7 +643,7 @@ bool OatWriter::WriteAndOpenDexFiles(
SafeMap<std::string, std::string>* key_value_store,
bool verify,
bool update_input_vdex,
- /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
+ /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
CHECK(write_state_ == WriteState::kAddingDexFileSources);
@@ -646,7 +652,7 @@ bool OatWriter::WriteAndOpenDexFiles(
return false;
}
- std::unique_ptr<MemMap> dex_files_map;
+ std::vector<std::unique_ptr<MemMap>> dex_files_map;
std::vector<std::unique_ptr<const DexFile>> dex_files;
// Initialize VDEX and OAT headers.
@@ -1334,7 +1340,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
bool has_code_info = method_header->IsOptimized();
// Record debug information for this function if we are doing that.
debug::MethodDebugInfo& info = writer_->method_info_[debug_info_idx];
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = method_ref.dex_file;
info.class_def_index = class_def_index;
info.dex_method_index = method_ref.index;
@@ -2418,7 +2424,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
(field) = compiler_driver_->Create ## fn_name(); \
if (generate_debug_info) { \
debug::MethodDebugInfo info = {}; \
- info.trampoline_name = #fn_name; \
+ info.custom_name = #fn_name; \
info.isa = instruction_set; \
info.is_code_address_text_relative = true; \
/* Use the code offset rather than the `adjusted_offset`. */ \
@@ -2617,42 +2623,54 @@ bool OatWriter::WriteRodata(OutputStream* out) {
return true;
}
-class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
+class OatWriter::WriteQuickeningInfoMethodVisitor {
public:
- WriteQuickeningInfoMethodVisitor(OatWriter* writer,
- OutputStream* out,
- uint32_t offset,
- SafeMap<const uint8_t*, uint32_t>* offset_map)
- : DexMethodVisitor(writer, offset),
- out_(out),
- written_bytes_(0u),
- offset_map_(offset_map) {}
+ WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out)
+ : writer_(writer),
+ out_(out) {}
- bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it)
- OVERRIDE {
- uint32_t method_idx = it.GetMemberIndex();
- CompiledMethod* compiled_method =
- writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+ bool VisitDexMethods(const std::vector<const DexFile*>& dex_files) {
+ std::vector<uint8_t> empty_quicken_info;
+ {
+ // Since we need to be able to access by dex method index, put a one byte empty quicken info
+ // for any method that isn't quickened.
+ QuickenInfoTable::Builder empty_info(&empty_quicken_info, /*num_elements*/ 0u);
+ CHECK(!empty_quicken_info.empty());
+ }
+ for (const DexFile* dex_file : dex_files) {
+ std::vector<uint32_t>* const offsets =
+ &quicken_info_offset_indices_.Put(dex_file, std::vector<uint32_t>())->second;
+
+ // Every method needs an index in the table.
+ for (uint32_t method_idx = 0; method_idx < dex_file->NumMethodIds(); ++method_idx) {
+ ArrayRef<const uint8_t> map(empty_quicken_info);
+
+ // Use the existing quicken info if it exists.
+ MethodReference method_ref(dex_file, method_idx);
+ CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(method_ref);
+ if (compiled_method != nullptr && HasQuickeningInfo(compiled_method)) {
+ map = compiled_method->GetVmapTable();
+ }
- if (HasQuickeningInfo(compiled_method)) {
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- // Deduplication is already done on a pointer basis by the compiler driver,
- // so we can simply compare the pointers to find out if things are duplicated.
- if (offset_map_->find(map.data()) == offset_map_->end()) {
- uint32_t length = map.size() * sizeof(map.front());
- offset_map_->Put(map.data(), written_bytes_);
- if (!out_->WriteFully(&length, sizeof(length)) ||
- !out_->WriteFully(map.data(), length)) {
- PLOG(ERROR) << "Failed to write quickening info for "
- << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to "
- << out_->GetLocation();
+ // The current approach prevents deduplication of quicken infos since each method index
+ // has one unique quicken info. Deduplication does not provide much savings for dex indices
+ // since they are rarely duplicated.
+ const uint32_t length = map.size() * sizeof(map.front());
+
+ // Record each index if required. written_bytes_ is the offset from the start of the
+ // quicken info data.
+ if (QuickenInfoOffsetTableAccessor::IsCoveredIndex(method_idx)) {
+ offsets->push_back(written_bytes_);
+ }
+
+ if (!out_->WriteFully(map.data(), length)) {
+ PLOG(ERROR) << "Failed to write quickening info for " << method_ref.PrettyMethod()
+ << " to " << out_->GetLocation();
return false;
}
- written_bytes_ += sizeof(length) + length;
- offset_ += sizeof(length) + length;
+ written_bytes_ += length;
}
}
-
return true;
}
@@ -2660,71 +2678,59 @@ class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
return written_bytes_;
}
+ SafeMap<const DexFile*, std::vector<uint32_t>>& GetQuickenInfoOffsetIndicies() {
+ return quicken_info_offset_indices_;
+ }
+
+
private:
+ OatWriter* const writer_;
OutputStream* const out_;
- size_t written_bytes_;
- // Maps quickening map to its offset in the file.
- SafeMap<const uint8_t*, uint32_t>* offset_map_;
+ size_t written_bytes_ = 0u;
+ // Map of offsets for quicken info related to method indices.
+ SafeMap<const DexFile*, std::vector<uint32_t>> quicken_info_offset_indices_;
};
-class OatWriter::WriteQuickeningIndicesMethodVisitor {
+class OatWriter::WriteQuickeningInfoOffsetsMethodVisitor {
public:
- WriteQuickeningIndicesMethodVisitor(OutputStream* out,
- uint32_t quickening_info_bytes,
- const SafeMap<const uint8_t*, uint32_t>& offset_map)
+ WriteQuickeningInfoOffsetsMethodVisitor(
+ OutputStream* out,
+ uint32_t start_offset,
+ SafeMap<const DexFile*, std::vector<uint32_t>>* quicken_info_offset_indices,
+ std::vector<uint32_t>* out_table_offsets)
: out_(out),
- quickening_info_bytes_(quickening_info_bytes),
- written_bytes_(0u),
- offset_map_(offset_map) {}
+ start_offset_(start_offset),
+ quicken_info_offset_indices_(quicken_info_offset_indices),
+ out_table_offsets_(out_table_offsets) {}
- bool VisitDexMethods(const std::vector<const DexFile*>& dex_files, const CompilerDriver& driver) {
+ bool VisitDexMethods(const std::vector<const DexFile*>& dex_files) {
for (const DexFile* dex_file : dex_files) {
- const size_t class_def_count = dex_file->NumClassDefs();
- for (size_t class_def_index = 0; class_def_index != class_def_count; ++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) {
- continue;
- }
- for (ClassDataItemIterator class_it(*dex_file, class_data);
- class_it.HasNext();
- class_it.Next()) {
- if (!class_it.IsAtMethod() || class_it.GetMethodCodeItem() == nullptr) {
- continue;
- }
- uint32_t method_idx = class_it.GetMemberIndex();
- CompiledMethod* compiled_method =
- driver.GetCompiledMethod(MethodReference(dex_file, method_idx));
- const DexFile::CodeItem* code_item = class_it.GetMethodCodeItem();
- CodeItemDebugInfoAccessor accessor(*dex_file, code_item);
- const uint32_t existing_debug_info_offset = accessor.DebugInfoOffset();
- // If the existing offset is already out of bounds (and not magic marker 0xFFFFFFFF)
- // we will pretend the method has been quickened.
- bool existing_offset_out_of_bounds =
- (existing_debug_info_offset >= dex_file->Size() &&
- existing_debug_info_offset != 0xFFFFFFFF);
- bool has_quickening_info = HasQuickeningInfo(compiled_method);
- if (has_quickening_info || existing_offset_out_of_bounds) {
- uint32_t new_debug_info_offset =
- dex_file->Size() + quickening_info_bytes_ + written_bytes_;
- // Abort if overflow.
- CHECK_GE(new_debug_info_offset, dex_file->Size());
- const_cast<DexFile::CodeItem*>(code_item)->SetDebugInfoOffset(new_debug_info_offset);
- uint32_t quickening_offset = has_quickening_info
- ? offset_map_.Get(compiled_method->GetVmapTable().data())
- : VdexFile::kNoQuickeningInfoOffset;
- if (!out_->WriteFully(&existing_debug_info_offset,
- sizeof(existing_debug_info_offset)) ||
- !out_->WriteFully(&quickening_offset, sizeof(quickening_offset))) {
- PLOG(ERROR) << "Failed to write quickening info for "
- << dex_file->PrettyMethod(method_idx) << " to "
- << out_->GetLocation();
- return false;
- }
- written_bytes_ += sizeof(existing_debug_info_offset) + sizeof(quickening_offset);
- }
- }
+ auto it = quicken_info_offset_indices_->find(dex_file);
+ DCHECK(it != quicken_info_offset_indices_->end()) << "Failed to find dex file "
+ << dex_file->GetLocation();
+ const std::vector<uint32_t>* const offsets = &it->second;
+
+ const uint32_t current_offset = start_offset_ + written_bytes_;
+ CHECK_ALIGNED_PARAM(current_offset, QuickenInfoOffsetTableAccessor::Alignment());
+
+ // Generate and write the data.
+ std::vector<uint8_t> table_data;
+ QuickenInfoOffsetTableAccessor::Builder builder(&table_data);
+ for (uint32_t offset : *offsets) {
+ builder.AddOffset(offset);
}
+
+ // Store the offset since we need to put those after the dex file. Table offsets are relative
+ // to the start of the quicken info section.
+ out_table_offsets_->push_back(current_offset);
+
+ const uint32_t length = table_data.size() * sizeof(table_data.front());
+ if (!out_->WriteFully(table_data.data(), length)) {
+ PLOG(ERROR) << "Failed to write quickening offset table for " << dex_file->GetLocation()
+ << " to " << out_->GetLocation();
+ return false;
+ }
+ written_bytes_ += length;
}
return true;
}
@@ -2735,14 +2741,22 @@ class OatWriter::WriteQuickeningIndicesMethodVisitor {
private:
OutputStream* const out_;
- const uint32_t quickening_info_bytes_;
- size_t written_bytes_;
- // Maps quickening map to its offset in the file.
- const SafeMap<const uint8_t*, uint32_t>& offset_map_;
+ const uint32_t start_offset_;
+ size_t written_bytes_ = 0u;
+ // Maps containing the offsets for the tables.
+ SafeMap<const DexFile*, std::vector<uint32_t>>* const quicken_info_offset_indices_;
+ std::vector<uint32_t>* const out_table_offsets_;
};
bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
+ if (only_contains_uncompressed_zip_entries_) {
+ // Nothing to write. Leave `vdex_size_` untouched and unaligned.
+ vdex_quickening_info_offset_ = vdex_size_;
+ size_quickening_info_alignment_ = 0;
+ return true;
+ }
size_t initial_offset = vdex_size_;
+ // Make sure the table is properly aligned.
size_t start_offset = RoundUp(initial_offset, 4u);
off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
@@ -2753,36 +2767,71 @@ bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
return false;
}
- if (compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ size_t current_offset = start_offset;
+ if (compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
std::vector<uint32_t> dex_files_indices;
- SafeMap<const uint8_t*, uint32_t> offset_map;
- WriteQuickeningInfoMethodVisitor visitor1(this, vdex_out, start_offset, &offset_map);
- if (!VisitDexMethods(&visitor1)) {
+ WriteQuickeningInfoMethodVisitor write_quicken_info_visitor(this, vdex_out);
+ if (!write_quicken_info_visitor.VisitDexMethods(*dex_files_)) {
PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
return false;
}
- if (visitor1.GetNumberOfWrittenBytes() > 0) {
- WriteQuickeningIndicesMethodVisitor visitor2(vdex_out,
- visitor1.GetNumberOfWrittenBytes(),
- offset_map);
- if (!visitor2.VisitDexMethods(*dex_files_, *compiler_driver_)) {
- PLOG(ERROR) << "Failed to write the vdex quickening info. File: "
- << vdex_out->GetLocation();
+ uint32_t quicken_info_offset = write_quicken_info_visitor.GetNumberOfWrittenBytes();
+ current_offset = current_offset + quicken_info_offset;
+ uint32_t before_offset = current_offset;
+ current_offset = RoundUp(current_offset, QuickenInfoOffsetTableAccessor::Alignment());
+ const size_t extra_bytes = current_offset - before_offset;
+ quicken_info_offset += extra_bytes;
+ actual_offset = vdex_out->Seek(current_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(current_offset)) {
+ PLOG(ERROR) << "Failed to seek to quickening offset table section. Actual: " << actual_offset
+ << " Expected: " << current_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ std::vector<uint32_t> table_offsets;
+ WriteQuickeningInfoOffsetsMethodVisitor table_visitor(
+ vdex_out,
+ quicken_info_offset,
+ &write_quicken_info_visitor.GetQuickenInfoOffsetIndicies(),
+ /*out*/ &table_offsets);
+ if (!table_visitor.VisitDexMethods(*dex_files_)) {
+ PLOG(ERROR) << "Failed to write the vdex quickening info. File: "
+ << vdex_out->GetLocation();
+ return false;
+ }
+
+ CHECK_EQ(table_offsets.size(), dex_files_->size());
+
+ current_offset += table_visitor.GetNumberOfWrittenBytes();
+
+ // Store the offset table offset as a preheader for each dex.
+ size_t index = 0;
+ for (const OatDexFile& oat_dex_file : oat_dex_files_) {
+ const off_t desired_offset = oat_dex_file.dex_file_offset_ -
+ sizeof(VdexFile::QuickeningTableOffsetType);
+ actual_offset = vdex_out->Seek(desired_offset, kSeekSet);
+ if (actual_offset != desired_offset) {
+ PLOG(ERROR) << "Failed to seek to before dex file for writing offset table offset: "
+ << actual_offset << " Expected: " << desired_offset
+ << " Output: " << vdex_out->GetLocation();
return false;
}
-
- if (!vdex_out->Flush()) {
- PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ uint32_t offset = table_offsets[index];
+ if (!vdex_out->WriteFully(reinterpret_cast<const uint8_t*>(&offset), sizeof(offset))) {
+ PLOG(ERROR) << "Failed to write verifier deps."
<< " File: " << vdex_out->GetLocation();
return false;
}
- size_quickening_info_ = visitor1.GetNumberOfWrittenBytes() +
- visitor2.GetNumberOfWrittenBytes();
- } else {
- // We know we did not quicken.
- size_quickening_info_ = 0;
+ ++index;
+ }
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ << " File: " << vdex_out->GetLocation();
+ return false;
}
+ size_quickening_info_ = current_offset - start_offset;
} else {
// We know we did not quicken.
size_quickening_info_ = 0;
@@ -3295,14 +3344,103 @@ bool OatWriter::WriteDexFiles(OutputStream* out, File* file, bool update_input_v
vdex_dex_files_offset_ = vdex_size_;
- // Write dex files.
+ only_contains_uncompressed_zip_entries_ = true;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
- if (!WriteDexFile(out, file, &oat_dex_file, update_input_vdex)) {
- return false;
+ if (!oat_dex_file.source_.IsZipEntry()) {
+ only_contains_uncompressed_zip_entries_ = false;
+ break;
+ }
+ ZipEntry* entry = oat_dex_file.source_.GetZipEntry();
+ if (!entry->IsUncompressed() || !entry->IsAlignedToDexHeader()) {
+ only_contains_uncompressed_zip_entries_ = false;
+ break;
}
}
- CloseSources();
+ if (!only_contains_uncompressed_zip_entries_) {
+ // Write dex files.
+ for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ if (!WriteDexFile(out, file, &oat_dex_file, update_input_vdex)) {
+ return false;
+ }
+ }
+
+ // Write shared dex file data section and fix up the dex file headers.
+ vdex_dex_shared_data_offset_ = vdex_size_;
+ uint32_t shared_data_size = 0u;
+
+ if (dex_container_ != nullptr) {
+ CHECK(!update_input_vdex) << "Update input vdex should have empty dex container";
+ DexContainer::Section* const section = dex_container_->GetDataSection();
+ if (section->Size() > 0) {
+ const uint32_t shared_data_offset = vdex_size_;
+ const off_t existing_offset = out->Seek(0, kSeekCurrent);
+ if (static_cast<uint32_t>(existing_offset) != shared_data_offset) {
+ LOG(ERROR) << "Expected offset " << shared_data_offset << " but got " << existing_offset;
+ return false;
+ }
+ shared_data_size = section->Size();
+ if (!out->WriteFully(section->Begin(), shared_data_size)) {
+ LOG(ERROR) << "Failed to write shared data!";
+ return false;
+ }
+ // Fix up the dex headers to have correct offsets to the data section.
+ for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ // Overwrite the header by reading it, updating the offset, and writing it back out.
+ DexFile::Header header;
+ if (!file->PreadFully(&header, sizeof(header), oat_dex_file.dex_file_offset_)) {
+ LOG(ERROR) << "Failed to read dex header for updating";
+ return false;
+ }
+ CHECK(CompactDexFile::IsMagicValid(header.magic_)) << "Must be compact dex";
+ CHECK_GT(shared_data_offset, oat_dex_file.dex_file_offset_);
+ // Offset is from the dex file base.
+ header.data_off_ = shared_data_offset - oat_dex_file.dex_file_offset_;
+ // The size should already be what part of the data buffer may be used by the dex.
+ CHECK_LE(header.data_size_, shared_data_size);
+ if (!file->PwriteFully(&header, sizeof(header), oat_dex_file.dex_file_offset_)) {
+ LOG(ERROR) << "Failed to write dex header for updating";
+ return false;
+ }
+ }
+ section->Clear();
+ if (!out->Flush()) {
+ PLOG(ERROR) << "Failed to flush after writing shared dex section.";
+ return false;
+ }
+ }
+ dex_container_.reset();
+ } else {
+ if (update_input_vdex) {
+ for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ DexFile::Header header;
+ if (!file->PreadFully(&header, sizeof(header), oat_dex_file.dex_file_offset_)) {
+ PLOG(ERROR) << "Failed to read dex header";
+ return false;
+ }
+ if (!CompactDexFile::IsMagicValid(header.magic_)) {
+ // Non compact dex does not have shared data section.
+ continue;
+ }
+ const uint32_t expected_data_off = vdex_dex_shared_data_offset_ -
+ oat_dex_file.dex_file_offset_;
+ if (header.data_off_ != expected_data_off) {
+ PLOG(ERROR) << "Shared data section offset " << header.data_off_
+ << " does not match expected value " << expected_data_off;
+ return false;
+ }
+ // The different dex files currently can have different data sizes since
+ // the dex writer writes them one at a time into the shared section.:w
+ shared_data_size = std::max(shared_data_size, header.data_size_);
+ }
+ }
+ }
+ vdex_size_ += shared_data_size;
+ size_dex_file_ += shared_data_size;
+ } else {
+ vdex_dex_shared_data_offset_ = vdex_size_;
+ }
+
return true;
}
@@ -3323,9 +3461,10 @@ bool OatWriter::WriteDexFile(OutputStream* out,
return false;
}
// update_input_vdex disables compact dex and layout.
- if (!update_input_vdex && (profile_compilation_info_ != nullptr ||
- compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone)) {
- CHECK(!update_input_vdex) << "We should never update the input vdex when doing dexlayout";
+ if (profile_compilation_info_ != nullptr ||
+ compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
+ CHECK(!update_input_vdex)
+ << "We should never update the input vdex when doing dexlayout or compact dex";
if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
return false;
}
@@ -3357,9 +3496,15 @@ bool OatWriter::SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex
// Dex files are required to be 4 byte aligned.
size_t initial_offset = vdex_size_;
size_t start_offset = RoundUp(initial_offset, 4);
- size_t file_offset = start_offset;
size_dex_file_alignment_ += start_offset - initial_offset;
+ // Leave extra room for the quicken offset table offset.
+ start_offset += sizeof(VdexFile::QuickeningTableOffsetType);
+ // TODO: Not count the offset as part of alignment.
+ size_dex_file_alignment_ += sizeof(VdexFile::QuickeningTableOffsetType);
+
+ size_t file_offset = start_offset;
+
// Seek to the start of the dex file and flush any pending operations in the stream.
// Verify that, after flushing the stream, the file is at the same offset as the stream.
off_t actual_offset = out->Seek(file_offset, kSeekSet);
@@ -3388,10 +3533,16 @@ bool OatWriter::SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex
}
bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_file) {
+ // Open dex files and write them into `out`.
+ // Note that we only verify dex files which do not belong to the boot class path.
+ // This is because those have been processed by `hiddenapi` and would not pass
+ // some of the checks. No guarantees are lost, however, as `hiddenapi` verifies
+ // the dex files prior to processing.
TimingLogger::ScopedTiming split("Dex Layout", timings_);
std::string error_msg;
std::string location(oat_dex_file->GetLocation());
std::unique_ptr<const DexFile> dex_file;
+ const ArtDexFileLoader dex_file_loader;
if (oat_dex_file->source_.IsZipEntry()) {
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
std::unique_ptr<MemMap> mem_map(
@@ -3400,12 +3551,12 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
return false;
}
- dex_file = DexFileLoader::Open(location,
- zip_entry->GetCrc32(),
- std::move(mem_map),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_msg);
+ dex_file = dex_file_loader.Open(location,
+ zip_entry->GetCrc32(),
+ std::move(mem_map),
+ /* verify */ !compiling_boot_image_,
+ /* verify_checksum */ true,
+ &error_msg);
} else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
int dup_fd = dup(raw_file->Fd());
@@ -3413,8 +3564,11 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
}
- dex_file = DexFileLoader::OpenDex(
- dup_fd, location, /* verify */ true, /* verify_checksum */ true, &error_msg);
+ dex_file = dex_file_loader.OpenDex(dup_fd, location,
+ /* verify */ !compiling_boot_image_,
+ /* verify_checksum */ true,
+ /* mmap_shared */ false,
+ &error_msg);
} else {
// The source data is a vdex file.
CHECK(oat_dex_file->source_.IsRawData())
@@ -3426,34 +3580,40 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation()));
const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
// Since the source may have had its layout changed, or may be quickened, don't verify it.
- dex_file = DexFileLoader::Open(raw_dex_file,
- header->file_size_,
- location,
- oat_dex_file->dex_file_location_checksum_,
- nullptr,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg);
+ dex_file = dex_file_loader.Open(raw_dex_file,
+ header->file_size_,
+ location,
+ oat_dex_file->dex_file_location_checksum_,
+ nullptr,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ &error_msg);
}
if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
return false;
}
Options options;
- options.output_to_memmap_ = true;
options.compact_dex_level_ = compact_dex_level_;
options.update_checksum_ = true;
- DexLayout dex_layout(options, profile_compilation_info_, nullptr);
- dex_layout.ProcessDexFile(location.c_str(), dex_file.get(), 0);
- std::unique_ptr<MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
+ DexLayout dex_layout(options, profile_compilation_info_, /*file*/ nullptr, /*header*/ nullptr);
+ dex_layout.ProcessDexFile(location.c_str(), dex_file.get(), 0, &dex_container_);
oat_dex_file->dex_sections_layout_ = dex_layout.GetSections();
// Dex layout can affect the size of the dex file, so we update here what we have set
// when adding the dex file as a source.
- const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(mem_map->Begin());
+ const UnalignedDexFileHeader* header =
+ AsUnalignedDexFileHeader(dex_container_->GetMainSection()->Begin());
oat_dex_file->dex_file_size_ = header->file_size_;
- if (!WriteDexFile(out, oat_dex_file, mem_map->Begin(), /* update_input_vdex */ false)) {
+ if (!WriteDexFile(out,
+ oat_dex_file,
+ dex_container_->GetMainSection()->Begin(),
+ /* update_input_vdex */ false)) {
return false;
}
+ if (dex_container_ != nullptr) {
+ // Clear the main section in case we write more data into the container.
+ dex_container_->GetMainSection()->Clear();
+ }
CHECK_EQ(oat_dex_file->dex_file_location_checksum_, dex_file->GetLocationChecksum());
return true;
}
@@ -3626,7 +3786,7 @@ bool OatWriter::WriteDexFile(OutputStream* out,
bool OatWriter::OpenDexFiles(
File* file,
bool verify,
- /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
+ /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
TimingLogger::ScopedTiming split("OpenDexFiles", timings_);
@@ -3635,6 +3795,44 @@ bool OatWriter::OpenDexFiles(
return true;
}
+ if (only_contains_uncompressed_zip_entries_) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::vector<std::unique_ptr<MemMap>> maps;
+ for (OatDexFile& oat_dex_file : oat_dex_files_) {
+ std::string error_msg;
+ MemMap* map = oat_dex_file.source_.GetZipEntry()->MapDirectlyFromFile(
+ oat_dex_file.dex_file_location_data_, &error_msg);
+ if (map == nullptr) {
+ LOG(ERROR) << error_msg;
+ return false;
+ }
+ maps.emplace_back(map);
+ // Now, open the dex file.
+ const ArtDexFileLoader dex_file_loader;
+ dex_files.emplace_back(dex_file_loader.Open(map->Begin(),
+ map->Size(),
+ oat_dex_file.GetLocation(),
+ oat_dex_file.dex_file_location_checksum_,
+ /* oat_dex_file */ nullptr,
+ verify,
+ verify,
+ &error_msg));
+ if (dex_files.back() == nullptr) {
+ LOG(ERROR) << "Failed to open dex file from oat file. File: " << oat_dex_file.GetLocation()
+ << " Error: " << error_msg;
+ return false;
+ }
+ oat_dex_file.class_offsets_.resize(dex_files.back()->GetHeader().class_defs_size_);
+ }
+ *opened_dex_files_map = std::move(maps);
+ *opened_dex_files = std::move(dex_files);
+ CloseSources();
+ return true;
+ }
+ // We could have closed the sources at the point of writing the dex files, but to
+ // make it consistent with the case we're not writing the dex files, we close them now.
+ CloseSources();
+
size_t map_offset = oat_dex_files_[0].dex_file_offset_;
size_t length = vdex_size_ - map_offset;
@@ -3653,6 +3851,7 @@ bool OatWriter::OpenDexFiles(
<< " error: " << error_msg;
return false;
}
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
const uint8_t* raw_dex_file =
@@ -3674,14 +3873,14 @@ bool OatWriter::OpenDexFiles(
}
// Now, open the dex file.
- dex_files.emplace_back(DexFileLoader::Open(raw_dex_file,
- oat_dex_file.dex_file_size_,
- oat_dex_file.GetLocation(),
- oat_dex_file.dex_file_location_checksum_,
- /* oat_dex_file */ nullptr,
- verify,
- verify,
- &error_msg));
+ dex_files.emplace_back(dex_file_loader.Open(raw_dex_file,
+ oat_dex_file.dex_file_size_,
+ oat_dex_file.GetLocation(),
+ oat_dex_file.dex_file_location_checksum_,
+ /* oat_dex_file */ nullptr,
+ verify,
+ verify,
+ &error_msg));
if (dex_files.back() == nullptr) {
LOG(ERROR) << "Failed to open dex file from oat file. File: " << oat_dex_file.GetLocation()
<< " Error: " << error_msg;
@@ -3689,11 +3888,11 @@ bool OatWriter::OpenDexFiles(
}
// Set the class_offsets size now that we have easy access to the DexFile and
- // it has been verified in DexFileLoader::Open.
+ // it has been verified in dex_file_loader.Open.
oat_dex_file.class_offsets_.resize(dex_files.back()->GetHeader().class_defs_size_);
}
- *opened_dex_files_map = std::move(dex_files_map);
+ opened_dex_files_map->push_back(std::move(dex_files_map));
*opened_dex_files = std::move(dex_files);
return true;
}
@@ -3876,12 +4075,14 @@ bool OatWriter::WriteChecksumsAndVdexHeader(OutputStream* vdex_out) {
DCHECK_NE(vdex_verifier_deps_offset_, 0u);
DCHECK_NE(vdex_quickening_info_offset_, 0u);
- size_t dex_section_size = vdex_verifier_deps_offset_ - vdex_dex_files_offset_;
+ size_t dex_section_size = vdex_dex_shared_data_offset_ - vdex_dex_files_offset_;
+ size_t dex_shared_data_size = vdex_verifier_deps_offset_ - vdex_dex_shared_data_offset_;
size_t verifier_deps_section_size = vdex_quickening_info_offset_ - vdex_verifier_deps_offset_;
size_t quickening_info_section_size = vdex_size_ - vdex_quickening_info_offset_;
VdexFile::Header vdex_header(oat_dex_files_.size(),
dex_section_size,
+ dex_shared_data_size,
verifier_deps_section_size,
quickening_info_section_size);
if (!vdex_out->WriteFully(&vdex_header, sizeof(VdexFile::Header))) {
@@ -4160,5 +4361,22 @@ const uint8_t* OatWriter::LookupBootImageClassTableSlot(const DexFile& dex_file,
UNREACHABLE();
}
+debug::DebugInfo OatWriter::GetDebugInfo() const {
+ debug::DebugInfo debug_info{};
+ debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_info_);
+ if (VdexWillContainDexFiles()) {
+ DCHECK_EQ(dex_files_->size(), oat_dex_files_.size());
+ for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ const OatDexFile& oat_dex_file = oat_dex_files_[i];
+ uint32_t dex_file_offset = oat_dex_file.dex_file_offset_;
+ if (dex_file_offset != 0) {
+ debug_info.dex_files.emplace(dex_file_offset, dex_file);
+ }
+ }
+ }
+ return debug_info;
+}
+
} // namespace linker
} // namespace art
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index ba29e3b3a2..7edb032dd0 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -25,6 +25,7 @@
#include "base/array_ref.h"
#include "base/dchecked_vector.h"
#include "dex/compact_dex_level.h"
+#include "debug/debug_info.h"
#include "linker/relative_patcher.h" // For RelativePatcherTargetProvider.
#include "mem_map.h"
#include "method_reference.h"
@@ -40,6 +41,7 @@ namespace art {
class BitVector;
class CompiledMethod;
class CompilerDriver;
+class DexContainer;
class ProfileCompilationInfo;
class TimingLogger;
class TypeLookupTable;
@@ -173,7 +175,7 @@ class OatWriter {
SafeMap<std::string, std::string>* key_value_store,
bool verify,
bool update_input_vdex,
- /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
+ /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
@@ -230,15 +232,17 @@ class OatWriter {
return bss_roots_offset_;
}
+ size_t GetVdexSize() const {
+ return vdex_size_;
+ }
+
size_t GetOatDataOffset() const {
return oat_data_offset_;
}
~OatWriter();
- ArrayRef<const debug::MethodDebugInfo> GetMethodDebugInfo() const {
- return ArrayRef<const debug::MethodDebugInfo>(method_info_);
- }
+ debug::DebugInfo GetDebugInfo() const;
const CompilerDriver* GetCompilerDriver() const {
return compiler_driver_;
@@ -271,7 +275,7 @@ class OatWriter {
class WriteMapMethodVisitor;
class WriteMethodInfoVisitor;
class WriteQuickeningInfoMethodVisitor;
- class WriteQuickeningIndicesMethodVisitor;
+ class WriteQuickeningInfoOffsetsMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
// with a given DexMethodVisitor.
@@ -300,7 +304,7 @@ class OatWriter {
bool update_input_vdex);
bool OpenDexFiles(File* file,
bool verify,
- /*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
+ /*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
size_t InitOatHeader(InstructionSet instruction_set,
@@ -336,6 +340,10 @@ class OatWriter {
bool MayHaveCompiledMethods() const;
+ bool VdexWillContainDexFiles() const {
+ return dex_files_ != nullptr && !only_contains_uncompressed_zip_entries_;
+ }
+
// Find the address of the GcRoot<String> in the InternTable for a boot image string.
const uint8_t* LookupBootImageInternTableSlot(const DexFile& dex_file,
dex::StringIndex string_idx);
@@ -367,6 +375,8 @@ class OatWriter {
const CompilerDriver* compiler_driver_;
ImageWriter* image_writer_;
const bool compiling_boot_image_;
+ // Whether the dex files being compiled are all uncompressed in the APK.
+ bool only_contains_uncompressed_zip_entries_;
// note OatFile does not take ownership of the DexFiles
const std::vector<const DexFile*>* dex_files_;
@@ -377,6 +387,9 @@ class OatWriter {
// Offset of section holding Dex files inside Vdex.
size_t vdex_dex_files_offset_;
+ // Offset of section holding shared dex data section in the Vdex.
+ size_t vdex_dex_shared_data_offset_;
+
// Offset of section holding VerifierDeps inside Vdex.
size_t vdex_verifier_deps_offset_;
@@ -513,6 +526,9 @@ class OatWriter {
// This pointer is only non-null after InitOatCodeDexFiles succeeds.
std::unique_ptr<OrderedMethodList> ordered_methods_;
+ // Container of shared dex data.
+ std::unique_ptr<DexContainer> dex_container_;
+
DISALLOW_COPY_AND_ASSIGN(OatWriter);
};
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 488806092b..cd6ca51dda 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -32,6 +32,7 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "jit/profile_compilation_info.h"
#include "linker/buffered_output_stream.h"
#include "linker/elf_writer.h"
#include "linker/elf_writer_quick.h"
@@ -187,7 +188,7 @@ class OatTest : public CommonCompilerTest {
oat_file);
elf_writer->Start();
OutputStream* oat_rodata = elf_writer->StartRoData();
- std::unique_ptr<MemMap> opened_dex_files_map;
+ std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
if (!oat_writer.WriteAndOpenDexFiles(vdex_file,
oat_rodata,
@@ -196,7 +197,7 @@ class OatTest : public CommonCompilerTest {
&key_value_store,
verify,
/* update_input_vdex */ false,
- &opened_dex_files_map,
+ &opened_dex_files_maps,
&opened_dex_files)) {
return false;
}
@@ -219,7 +220,8 @@ class OatTest : public CommonCompilerTest {
text_size,
oat_writer.GetBssSize(),
oat_writer.GetBssMethodsOffset(),
- oat_writer.GetBssRootsOffset());
+ oat_writer.GetBssRootsOffset(),
+ oat_writer.GetVdexSize());
std::unique_ptr<BufferedOutputStream> vdex_out =
std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
@@ -249,13 +251,15 @@ class OatTest : public CommonCompilerTest {
}
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer.GetDebugInfo());
if (!elf_writer->End()) {
return false;
}
- opened_dex_files_maps_.emplace_back(std::move(opened_dex_files_map));
+ for (std::unique_ptr<MemMap>& map : opened_dex_files_maps) {
+ opened_dex_files_maps_.emplace_back(std::move(map));
+ }
for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
opened_dex_files_.emplace_back(dex_file.release());
}
@@ -484,7 +488,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(76U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(162 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
@@ -658,7 +662,11 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
ASSERT_EQ(dex_file2_data->GetLocation(), opened_dex_file2->GetLocation());
const VdexFile::Header &vdex_header = opened_oat_file->GetVdexFile()->GetHeader();
- ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
+ if (!compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
+ // If quickening is enabled we will always write the table since there is no special logic that
+ // checks for all methods not being quickened (not worth the complexity).
+ ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
+ }
int64_t actual_vdex_size = vdex_file.GetFile()->GetLength();
ASSERT_GE(actual_vdex_size, 0);
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 4916d643c6..f6b7a6b68a 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -14,33 +14,37 @@
// TODO(ajcbik): rename dexdump2 into dexdump when Dalvik version is removed
-art_cc_binary {
- name: "dexdump2",
- host_supported: true,
+
+cc_defaults {
+ name: "dexdump_defaults",
srcs: [
"dexdump_cfg.cc",
"dexdump_main.cc",
"dexdump.cc",
],
cflags: ["-Wall", "-Werror"],
+ // TODO: fix b/72216369 and remove the need for this.
+ include_dirs: [
+ "art/runtime" // dex utils.
+ ],
+}
+
+art_cc_binary {
+ name: "dexdump2",
+ defaults: ["dexdump_defaults"],
+ host_supported: true,
shared_libs: [
- "libart",
+ "libdexfile",
"libbase",
],
}
art_cc_binary {
name: "dexdumps",
+ defaults: ["dexdump_defaults"],
host_supported: true,
device_supported: false,
- srcs: [
- "dexdump_cfg.cc",
- "dexdump_main.cc",
- "dexdump.cc",
- ],
- cflags: ["-Wall", "-Werror"],
static_libs: [
- "libart",
"libbase",
] + art_static_dependencies,
target: {
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 2c98e12741..8778b129c5 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -34,17 +34,23 @@
#include "dexdump.h"
+#include <fcntl.h>
#include <inttypes.h>
#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include <iostream>
#include <memory>
#include <sstream>
#include <vector>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
-#include "dex/code_item_accessors-no_art-inl.h"
+#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_file_loader.h"
@@ -1170,14 +1176,20 @@ static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
// Iterate over all instructions.
CodeItemDataAccessor accessor(*pDexFile, pCode);
+ const u4 maxPc = accessor.InsnsSizeInCodeUnits();
for (const DexInstructionPcPair& pair : accessor) {
+ const u4 dexPc = pair.DexPc();
+ if (dexPc >= maxPc) {
+ LOG(WARNING) << "GLITCH: run-away instruction at idx=0x" << std::hex << dexPc;
+ break;
+ }
const Instruction* instruction = &pair.Inst();
const u4 insnWidth = instruction->SizeInCodeUnits();
if (insnWidth == 0) {
- fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", pair.DexPc());
+ LOG(WARNING) << "GLITCH: zero-width instruction at idx=0x" << std::hex << dexPc;
break;
}
- dumpInstruction(pDexFile, pCode, codeOffset, pair.DexPc(), insnWidth, instruction);
+ dumpInstruction(pDexFile, pCode, codeOffset, dexPc, insnWidth, instruction);
} // for
}
@@ -1186,7 +1198,7 @@ static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
*/
static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags,
const DexFile::CodeItem* pCode, u4 codeOffset) {
- CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, pDexFile->GetDebugInfoOffset(pCode));
+ CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
fprintf(gOutFile, " registers : %d\n", accessor.RegistersSize());
fprintf(gOutFile, " ins : %d\n", accessor.InsSize());
@@ -1254,7 +1266,7 @@ static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
fprintf(gOutFile, "<method name=\"%s\"\n", name);
const char* returnType = strrchr(typeDescriptor, ')');
if (returnType == nullptr) {
- fprintf(stderr, "bad method type descriptor '%s'\n", typeDescriptor);
+ LOG(ERROR) << "bad method type descriptor '" << typeDescriptor << "'";
goto bail;
}
std::unique_ptr<char[]> dot(descriptorToDot(returnType + 1));
@@ -1273,7 +1285,7 @@ static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
// Parameters.
if (typeDescriptor[0] != '(') {
- fprintf(stderr, "ERROR: bad descriptor '%s'\n", typeDescriptor);
+ LOG(ERROR) << "ERROR: bad descriptor '" << typeDescriptor << "'";
goto bail;
}
char* tmpBuf = reinterpret_cast<char*>(malloc(strlen(typeDescriptor) + 1));
@@ -1292,7 +1304,7 @@ static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
} else {
// Primitive char, copy it.
if (strchr("ZBCSIFJD", *base) == nullptr) {
- fprintf(stderr, "ERROR: bad method signature '%s'\n", base);
+ LOG(ERROR) << "ERROR: bad method signature '" << base << "'";
break; // while
}
*cp++ = *base++;
@@ -1439,7 +1451,7 @@ static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
if (!(classDescriptor[0] == 'L' &&
classDescriptor[strlen(classDescriptor)-1] == ';')) {
// Arrays and primitives should not be defined explicitly. Keep going?
- fprintf(stderr, "Malformed class name '%s'\n", classDescriptor);
+ LOG(WARNING) << "Malformed class name '" << classDescriptor << "'";
} else if (gOptions.outputFormat == OUTPUT_XML) {
char* mangle = strdup(classDescriptor + 1);
mangle[strlen(mangle)-1] = '\0';
@@ -1689,7 +1701,7 @@ static void dumpCallSite(const DexFile* pDexFile, u4 idx) {
const DexFile::CallSiteIdItem& call_site_id = pDexFile->GetCallSiteId(idx);
CallSiteArrayValueIterator it(*pDexFile, call_site_id);
if (it.Size() < 3) {
- fprintf(stderr, "ERROR: Call site %u has too few values.\n", idx);
+ LOG(ERROR) << "ERROR: Call site " << idx << " has too few values.";
return;
}
@@ -1867,6 +1879,34 @@ static void processDexFile(const char* fileName,
}
}
+static bool openAndMapFile(const char* fileName,
+ const uint8_t** base,
+ size_t* size,
+ std::string* error_msg) {
+ int fd = open(fileName, O_RDONLY);
+ if (fd < 0) {
+ *error_msg = "open failed";
+ return false;
+ }
+ struct stat st;
+ if (fstat(fd, &st) < 0) {
+ *error_msg = "stat failed";
+ return false;
+ }
+ *size = st.st_size;
+ if (*size == 0) {
+ *error_msg = "size == 0";
+ return false;
+ }
+ void* addr = mmap(nullptr /*addr*/, *size, PROT_READ, MAP_PRIVATE, fd, 0 /*offset*/);
+ if (addr == MAP_FAILED) {
+ *error_msg = "mmap failed";
+ return false;
+ }
+ *base = reinterpret_cast<const uint8_t*>(addr);
+ return true;
+}
+
/*
* Processes a single file (either direct .dex or indirect .zip/.jar/.apk).
*/
@@ -1878,14 +1918,20 @@ int processFile(const char* fileName) {
// If the file is not a .dex file, the function tries .zip/.jar/.apk files,
// all of which are Zip archives with "classes.dex" inside.
const bool kVerifyChecksum = !gOptions.ignoreBadChecksum;
+ const uint8_t* base = nullptr;
+ size_t size = 0;
std::string error_msg;
+ if (!openAndMapFile(fileName, &base, &size, &error_msg)) {
+ LOG(ERROR) << error_msg;
+ return -1;
+ }
+ const DexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
- fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
+ if (!dex_file_loader.OpenAll(
+ base, size, fileName, /*verify*/ true, kVerifyChecksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
- fputs(error_msg.c_str(), stderr);
- fputc('\n', stderr);
+ LOG(ERROR) << error_msg;
return -1;
}
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
index 0e313572bc..69ee0682a3 100644
--- a/dexdump/dexdump_cfg.cc
+++ b/dexdump/dexdump_cfg.cc
@@ -25,7 +25,7 @@
#include <set>
#include <sstream>
-#include "dex/code_item_accessors-no_art-inl.h"
+#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_instruction-inl.h"
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index 382b551a1a..3c16fbe008 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -30,10 +30,6 @@
#include <android-base/logging.h>
-#include <base/logging.h> // For InitLogging.
-#include "mem_map.h"
-#include "runtime.h"
-
namespace art {
static const char* gProgName = "dexdump";
@@ -42,29 +38,25 @@ static const char* gProgName = "dexdump";
* Shows usage.
*/
static void usage(void) {
- fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
- " dexfile...\n\n", gProgName);
- fprintf(stderr, " -a : display annotations\n");
- fprintf(stderr, " -c : verify checksum and exit\n");
- fprintf(stderr, " -d : disassemble code sections\n");
- fprintf(stderr, " -e : display exported items only\n");
- fprintf(stderr, " -f : display summary information from file header\n");
- fprintf(stderr, " -g : display CFG for dex\n");
- fprintf(stderr, " -h : display file header details\n");
- fprintf(stderr, " -i : ignore checksum failures\n");
- fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
- fprintf(stderr, " -o : output file name (defaults to stdout)\n");
+ LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
+ LOG(ERROR) << gProgName << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
+ " dexfile...\n";
+ LOG(ERROR) << " -a : display annotations";
+ LOG(ERROR) << " -c : verify checksum and exit";
+ LOG(ERROR) << " -d : disassemble code sections";
+ LOG(ERROR) << " -e : display exported items only";
+ LOG(ERROR) << " -f : display summary information from file header";
+ LOG(ERROR) << " -g : display CFG for dex";
+ LOG(ERROR) << " -h : display file header details";
+ LOG(ERROR) << " -i : ignore checksum failures";
+ LOG(ERROR) << " -l : output layout, either 'plain' or 'xml'";
+ LOG(ERROR) << " -o : output file name (defaults to stdout)";
}
/*
* Main driver of the dexdump utility.
*/
int dexdumpDriver(int argc, char** argv) {
- // Art specific set up.
- InitLogging(argv, Runtime::Abort);
- MemMap::Init();
-
// Reset options.
bool wantUsage = false;
memset(&gOptions, 0, sizeof(gOptions));
@@ -122,11 +114,11 @@ int dexdumpDriver(int argc, char** argv) {
// Detect early problems.
if (optind == argc) {
- fprintf(stderr, "%s: no file specified\n", gProgName);
+ LOG(ERROR) << "No file specified";
wantUsage = true;
}
if (gOptions.checksumOnly && gOptions.ignoreBadChecksum) {
- fprintf(stderr, "Can't specify both -c and -i\n");
+ LOG(ERROR) << "Can't specify both -c and -i";
wantUsage = true;
}
if (wantUsage) {
@@ -138,7 +130,7 @@ int dexdumpDriver(int argc, char** argv) {
if (gOptions.outputFileName) {
gOutFile = fopen(gOptions.outputFileName, "w");
if (!gOutFile) {
- fprintf(stderr, "Can't open %s\n", gOptions.outputFileName);
+ PLOG(ERROR) << "Can't open " << gOptions.outputFileName;
return 1;
}
}
@@ -154,5 +146,8 @@ int dexdumpDriver(int argc, char** argv) {
} // namespace art
int main(int argc, char** argv) {
+ // Output all logging to stderr.
+ android::base::SetLogger(android::base::StderrLogger);
+
return art::dexdumpDriver(argc, argv);
}
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index a02f75ad00..3ea7f4ba82 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -26,7 +26,10 @@ art_cc_defaults {
"dex_writer.cc",
],
export_include_dirs: ["."],
- shared_libs: ["libbase"],
+ shared_libs: [
+ "libdexfile",
+ "libbase",
+ ],
static_libs: ["libz"],
}
@@ -34,6 +37,12 @@ art_cc_library {
name: "libart-dexlayout",
defaults: ["libart-dexlayout-defaults"],
shared_libs: ["libart"],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ }
}
art_cc_library {
@@ -79,6 +88,7 @@ art_cc_binary {
art_cc_test {
name: "art_dexlayout_tests",
defaults: ["art_gtest_defaults"],
+ shared_libs: ["libart-dexlayout"],
srcs: ["dexlayout_test.cc"],
}
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 1c5b16d84b..ca13f7588b 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -16,11 +16,259 @@
#include "compact_dex_writer.h"
+#include "base/logging.h"
+#include "base/time_utils.h"
+#include "dex/compact_dex_debug_info.h"
#include "dex/compact_dex_file.h"
+#include "dexlayout.h"
namespace art {
-void CompactDexWriter::WriteHeader() {
+CompactDexWriter::CompactDexWriter(DexLayout* dex_layout)
+ : DexWriter(dex_layout, /*compute_offsets*/ true) {
+ CHECK(GetCompactDexLevel() != CompactDexLevel::kCompactDexLevelNone);
+}
+
+CompactDexLevel CompactDexWriter::GetCompactDexLevel() const {
+ return dex_layout_->GetOptions().compact_dex_level_;
+}
+
+CompactDexWriter::Container::Container(bool dedupe_code_items)
+ : code_item_dedupe_(dedupe_code_items, &data_section_),
+ data_item_dedupe_(/*dedupe*/ true, &data_section_) {}
+
+uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
+ const uint32_t start_offset = stream->Tell();
+ const dex_ir::Collections& collections = header_->GetCollections();
+ // Debug offsets for method indexes. 0 means no debug info.
+ std::vector<uint32_t> debug_info_offsets(collections.MethodIdsSize(), 0u);
+
+ static constexpr InvokeType invoke_types[] = {
+ kDirect,
+ kVirtual
+ };
+
+ for (InvokeType invoke_type : invoke_types) {
+ for (const std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ // Skip classes that are not defined in this dex file.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (auto& method : *(invoke_type == InvokeType::kDirect
+ ? class_data->DirectMethods()
+ : class_data->VirtualMethods())) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
+ const uint32_t debug_info_offset = code_item->DebugInfo()->GetOffset();
+ const uint32_t method_idx = method_id->GetIndex();
+ if (debug_info_offsets[method_idx] != 0u) {
+ CHECK_EQ(debug_info_offset, debug_info_offsets[method_idx]);
+ }
+ debug_info_offsets[method_idx] = debug_info_offset;
+ }
+ }
+ }
+ }
+
+ std::vector<uint8_t> data;
+ debug_info_base_ = 0u;
+ debug_info_offsets_table_offset_ = 0u;
+ CompactDexDebugInfoOffsetTable::Build(debug_info_offsets,
+ &data,
+ &debug_info_base_,
+ &debug_info_offsets_table_offset_);
+ // Align the table and write it out.
+ stream->AlignTo(CompactDexDebugInfoOffsetTable::kAlignment);
+ debug_info_offsets_pos_ = stream->Tell();
+ stream->Write(data.data(), data.size());
+
+ // Verify that the whole table decodes as expected and measure average performance.
+ const bool kMeasureAndTestOutput = dex_layout_->GetOptions().verify_output_;
+ if (kMeasureAndTestOutput && !debug_info_offsets.empty()) {
+ uint64_t start_time = NanoTime();
+ stream->Begin();
+ CompactDexDebugInfoOffsetTable::Accessor accessor(stream->Begin() + debug_info_offsets_pos_,
+ debug_info_base_,
+ debug_info_offsets_table_offset_);
+
+ for (size_t i = 0; i < debug_info_offsets.size(); ++i) {
+ CHECK_EQ(accessor.GetDebugInfoOffset(i), debug_info_offsets[i]);
+ }
+ uint64_t end_time = NanoTime();
+ VLOG(dex) << "Average lookup time (ns) for debug info offsets: "
+ << (end_time - start_time) / debug_info_offsets.size();
+ }
+
+ return stream->Tell() - start_offset;
+}
+
+CompactDexWriter::ScopedDataSectionItem::ScopedDataSectionItem(Stream* stream,
+ dex_ir::Item* item,
+ size_t alignment,
+ Deduper* deduper)
+ : stream_(stream),
+ item_(item),
+ alignment_(alignment),
+ deduper_(deduper),
+ start_offset_(stream->Tell()) {
+ stream_->AlignTo(alignment_);
+}
+
+CompactDexWriter::ScopedDataSectionItem::~ScopedDataSectionItem() {
+ // After having written, maybe dedupe the whole code item (excluding padding).
+ const uint32_t deduped_offset = deduper_->Dedupe(start_offset_,
+ stream_->Tell(),
+ item_->GetOffset());
+ // If we deduped, only use the deduped offset if the alignment matches the required alignment.
+ // Otherwise, return without deduping.
+ if (deduped_offset != Deduper::kDidNotDedupe && IsAlignedParam(deduped_offset, alignment_)) {
+ // Update the IR offset to the offset of the deduped item.
+ item_->SetOffset(deduped_offset);
+ // Clear the written data for the item so that the stream write doesn't abort in the future.
+ stream_->Clear(start_offset_, stream_->Tell() - start_offset_);
+ // Since we deduped, restore the offset to the original position.
+ stream_->Seek(start_offset_);
+ }
+}
+
+size_t CompactDexWriter::ScopedDataSectionItem::Written() const {
+ return stream_->Tell() - start_offset_;
+}
+
+void CompactDexWriter::WriteCodeItem(Stream* stream,
+ dex_ir::CodeItem* code_item,
+ bool reserve_only) {
+ DCHECK(code_item != nullptr);
+ DCHECK(!reserve_only) << "Not supported because of deduping.";
+ ScopedDataSectionItem data_item(stream,
+ code_item,
+ CompactDexFile::CodeItem::kAlignment,
+ code_item_dedupe_);
+
+ CompactDexFile::CodeItem disk_code_item;
+
+ uint16_t preheader_storage[CompactDexFile::CodeItem::kMaxPreHeaderSize] = {};
+ uint16_t* preheader_end = preheader_storage + CompactDexFile::CodeItem::kMaxPreHeaderSize;
+ const uint16_t* preheader = disk_code_item.Create(
+ code_item->RegistersSize(),
+ code_item->InsSize(),
+ code_item->OutsSize(),
+ code_item->TriesSize(),
+ code_item->InsnsSize(),
+ preheader_end);
+ const size_t preheader_bytes = (preheader_end - preheader) * sizeof(preheader[0]);
+
+ static constexpr size_t kPayloadInstructionRequiredAlignment = 4;
+ const uint32_t current_code_item_start = stream->Tell() + preheader_bytes;
+ if (!IsAlignedParam(current_code_item_start, kPayloadInstructionRequiredAlignment) ||
+ kIsDebugBuild) {
+ // If the preheader is going to make the code unaligned, consider adding 2 bytes of padding
+ // before if required.
+ IterationRange<DexInstructionIterator> instructions = code_item->Instructions();
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for (; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ if (std::next(it).IsErrorState()) {
+ break;
+ }
+ const Instruction::Code opcode = it->Opcode();
+ // Payload instructions possibly require special alignment for their data.
+ if (opcode == Instruction::FILL_ARRAY_DATA ||
+ opcode == Instruction::PACKED_SWITCH ||
+ opcode == Instruction::SPARSE_SWITCH) {
+ stream->Skip(
+ RoundUp(current_code_item_start, kPayloadInstructionRequiredAlignment) -
+ current_code_item_start);
+ break;
+ }
+ }
+ }
+
+ // Write preheader first.
+ stream->Write(reinterpret_cast<const uint8_t*>(preheader), preheader_bytes);
+ // Registered offset is after the preheader.
+ ProcessOffset(stream, code_item);
+ // Avoid using sizeof so that we don't write the fake instruction array at the end of the code
+ // item.
+ stream->Write(&disk_code_item, OFFSETOF_MEMBER(CompactDexFile::CodeItem, insns_));
+ // Write the instructions.
+ stream->Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t));
+ // Write the post instruction data.
+ WriteCodeItemPostInstructionData(stream, code_item, reserve_only);
+}
+
+void CompactDexWriter::WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) {
+ ScopedDataSectionItem data_item(stream,
+ debug_info,
+ SectionAlignment(DexFile::kDexTypeDebugInfoItem),
+ data_item_dedupe_);
+ ProcessOffset(stream, debug_info);
+ stream->Write(debug_info->GetDebugInfo(), debug_info->GetDebugInfoSize());
+}
+
+
+CompactDexWriter::Deduper::Deduper(bool enabled, DexContainer::Section* section)
+ : enabled_(enabled),
+ dedupe_map_(/*bucket_count*/ 32,
+ HashedMemoryRange::HashEqual(section),
+ HashedMemoryRange::HashEqual(section)) {}
+
+uint32_t CompactDexWriter::Deduper::Dedupe(uint32_t data_start,
+ uint32_t data_end,
+ uint32_t item_offset) {
+ if (!enabled_) {
+ return kDidNotDedupe;
+ }
+ HashedMemoryRange range {data_start, data_end - data_start};
+ auto existing = dedupe_map_.emplace(range, item_offset);
+ if (!existing.second) {
+ // Failed to insert means we deduped, return the existing item offset.
+ return existing.first->second;
+ }
+ return kDidNotDedupe;
+}
+
+void CompactDexWriter::SortDebugInfosByMethodIndex() {
+ dex_ir::Collections& collections = header_->GetCollections();
+ static constexpr InvokeType invoke_types[] = {
+ kDirect,
+ kVirtual
+ };
+ std::map<const dex_ir::DebugInfoItem*, uint32_t> method_idx_map;
+ for (InvokeType invoke_type : invoke_types) {
+ for (std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ // Skip classes that are not defined in this dex file.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (auto& method : *(invoke_type == InvokeType::kDirect
+ ? class_data->DirectMethods()
+ : class_data->VirtualMethods())) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
+ const dex_ir::DebugInfoItem* debug_item = code_item->DebugInfo();
+ method_idx_map.insert(std::make_pair(debug_item, method_id->GetIndex()));
+ }
+ }
+ }
+ }
+ std::sort(collections.DebugInfoItems().begin(),
+ collections.DebugInfoItems().end(),
+ [&](const std::unique_ptr<dex_ir::DebugInfoItem>& a,
+ const std::unique_ptr<dex_ir::DebugInfoItem>& b) {
+ auto it_a = method_idx_map.find(a.get());
+ auto it_b = method_idx_map.find(b.get());
+ uint32_t idx_a = it_a != method_idx_map.end() ? it_a->second : 0u;
+ uint32_t idx_b = it_b != method_idx_map.end() ? it_b->second : 0u;
+ return idx_a < idx_b;
+ });
+}
+
+void CompactDexWriter::WriteHeader(Stream* stream) {
CompactDexFile::Header header;
CompactDexFile::WriteMagic(&header.magic_[0]);
CompactDexFile::WriteCurrentVersion(&header.magic_[0]);
@@ -49,17 +297,186 @@ void CompactDexWriter::WriteHeader() {
header.class_defs_off_ = collections.ClassDefsOffset();
header.data_size_ = header_->DataSize();
header.data_off_ = header_->DataOffset();
+
+ // Compact dex specific flags.
+ header.debug_info_offsets_pos_ = debug_info_offsets_pos_;
+ header.debug_info_offsets_table_offset_ = debug_info_offsets_table_offset_;
+ header.debug_info_base_ = debug_info_base_;
header.feature_flags_ = 0u;
// In cases where apps are converted to cdex during install, maintain feature flags so that
// the verifier correctly verifies apps that aren't targetting default methods.
if (header_->SupportDefaultMethods()) {
header.feature_flags_ |= static_cast<uint32_t>(CompactDexFile::FeatureFlags::kDefaultMethods);
}
- UNUSED(Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u));
+ stream->Seek(0);
+ stream->Overwrite(reinterpret_cast<uint8_t*>(&header), sizeof(header));
}
size_t CompactDexWriter::GetHeaderSize() const {
return sizeof(CompactDexFile::Header);
}
+void CompactDexWriter::WriteStringData(Stream* stream, dex_ir::StringData* string_data) {
+ ScopedDataSectionItem data_item(stream,
+ string_data,
+ SectionAlignment(DexFile::kDexTypeStringDataItem),
+ data_item_dedupe_);
+ ProcessOffset(stream, string_data);
+ stream->WriteUleb128(CountModifiedUtf8Chars(string_data->Data()));
+ stream->Write(string_data->Data(), strlen(string_data->Data()));
+ // Skip null terminator (already zeroed out, no need to write).
+ stream->Skip(1);
+}
+
+void CompactDexWriter::Write(DexContainer* output) {
+ CHECK(compute_offsets_);
+ CHECK(output->IsCompactDexContainer());
+ Container* const container = down_cast<Container*>(output);
+ // For now, use the same stream for both data and metadata.
+ Stream temp_main_stream(output->GetMainSection());
+ CHECK_EQ(output->GetMainSection()->Size(), 0u);
+ Stream temp_data_stream(output->GetDataSection());
+ Stream* main_stream = &temp_main_stream;
+ Stream* data_stream = &temp_data_stream;
+
+ // We want offset 0 to be reserved for null, seek to the data section alignment or the end of the
+ // section.
+ data_stream->Seek(std::max(
+ static_cast<uint32_t>(output->GetDataSection()->Size()),
+ kDataSectionAlignment));
+ code_item_dedupe_ = &container->code_item_dedupe_;
+ data_item_dedupe_ = &container->data_item_dedupe_;
+
+ // Starting offset is right after the header.
+ main_stream->Seek(GetHeaderSize());
+
+ dex_ir::Collections& collection = header_->GetCollections();
+
+ // Based on: https://source.android.com/devices/tech/dalvik/dex-format
+ // Since the offsets may not be calculated already, the writing must be done in the correct order.
+ const uint32_t string_ids_offset = main_stream->Tell();
+ WriteStringIds(main_stream, /*reserve_only*/ true);
+ WriteTypeIds(main_stream);
+ const uint32_t proto_ids_offset = main_stream->Tell();
+ WriteProtoIds(main_stream, /*reserve_only*/ true);
+ WriteFieldIds(main_stream);
+ WriteMethodIds(main_stream);
+ const uint32_t class_defs_offset = main_stream->Tell();
+ WriteClassDefs(main_stream, /*reserve_only*/ true);
+ const uint32_t call_site_ids_offset = main_stream->Tell();
+ WriteCallSiteIds(main_stream, /*reserve_only*/ true);
+ WriteMethodHandles(main_stream);
+
+ if (compute_offsets_) {
+ // Data section.
+ data_stream->AlignTo(kDataSectionAlignment);
+ }
+
+ // Write code item first to minimize the space required for encoded methods.
+ // For cdex, the code items don't depend on the debug info.
+ WriteCodeItems(data_stream, /*reserve_only*/ false);
+
+ // Sort the debug infos by method index order, this reduces size by ~0.1% by reducing the size of
+ // the debug info offset table.
+ SortDebugInfosByMethodIndex();
+ WriteDebugInfoItems(data_stream);
+
+ WriteEncodedArrays(data_stream);
+ WriteAnnotations(data_stream);
+ WriteAnnotationSets(data_stream);
+ WriteAnnotationSetRefs(data_stream);
+ WriteAnnotationsDirectories(data_stream);
+ WriteTypeLists(data_stream);
+ WriteClassDatas(data_stream);
+ WriteStringDatas(data_stream);
+
+ // Write delayed id sections that depend on data sections.
+ {
+ Stream::ScopedSeek seek(main_stream, string_ids_offset);
+ WriteStringIds(main_stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(main_stream, proto_ids_offset);
+ WriteProtoIds(main_stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(main_stream, class_defs_offset);
+ WriteClassDefs(main_stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(main_stream, call_site_ids_offset);
+ WriteCallSiteIds(main_stream, /*reserve_only*/ false);
+ }
+
+ // Write the map list.
+ if (compute_offsets_) {
+ data_stream->AlignTo(SectionAlignment(DexFile::kDexTypeMapList));
+ collection.SetMapListOffset(data_stream->Tell());
+ } else {
+ data_stream->Seek(collection.MapListOffset());
+ }
+
+ // Map items are included in the data section.
+ GenerateAndWriteMapItems(data_stream);
+
+ // Write link data if it exists.
+ const std::vector<uint8_t>& link_data = collection.LinkData();
+ if (link_data.size() > 0) {
+ CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
+ if (compute_offsets_) {
+ header_->SetLinkOffset(data_stream->Tell());
+ } else {
+ data_stream->Seek(header_->LinkOffset());
+ }
+ data_stream->Write(&link_data[0], link_data.size());
+ }
+
+ // Write debug info offset table last to make dex file verifier happy.
+ WriteDebugInfoOffsetTable(data_stream);
+
+ data_stream->AlignTo(kDataSectionAlignment);
+ if (compute_offsets_) {
+ header_->SetDataSize(data_stream->Tell());
+ if (header_->DataSize() != 0) {
+ // Offset must be zero when the size is zero.
+ main_stream->AlignTo(kDataSectionAlignment);
+ // For now, default to saying the data is right after the main stream.
+ header_->SetDataOffset(main_stream->Tell());
+ header_->SetDataOffset(0u);
+ } else {
+ header_->SetDataOffset(0u);
+ }
+ }
+
+ // Write header last.
+ if (compute_offsets_) {
+ header_->SetFileSize(main_stream->Tell());
+ }
+ WriteHeader(main_stream);
+
+ // Trim sections to make sure they are sized properly.
+ output->GetMainSection()->Resize(header_->FileSize());
+ output->GetDataSection()->Resize(data_stream->Tell());
+
+ if (dex_layout_->GetOptions().update_checksum_) {
+ // Compute the cdex section (also covers the used part of the data section).
+ header_->SetChecksum(CompactDexFile::CalculateChecksum(output->GetMainSection()->Begin(),
+ output->GetMainSection()->Size(),
+ output->GetDataSection()->Begin(),
+ output->GetDataSection()->Size()));
+ // Rewrite the header with the calculated checksum.
+ WriteHeader(main_stream);
+ }
+
+ // Clear the dedupe to prevent interdex code item deduping. This does not currently work well with
+ // dex2oat's class unloading. The issue is that verification encounters quickened opcodes after
+ // the first dex gets unloaded.
+ code_item_dedupe_->Clear();
+}
+
+std::unique_ptr<DexContainer> CompactDexWriter::CreateDexContainer() const {
+ return std::unique_ptr<DexContainer>(
+ new CompactDexWriter::Container(dex_layout_->GetOptions().dedupe_code_items_));
+}
+
} // namespace art
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index d13333bb18..ea9f7d13db 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -19,27 +19,157 @@
#ifndef ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
#define ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
+#include <memory> // For unique_ptr
+#include <unordered_map>
+
#include "dex_writer.h"
+#include "utils.h"
namespace art {
+// Compact dex writer for a single dex.
class CompactDexWriter : public DexWriter {
public:
- CompactDexWriter(dex_ir::Header* header,
- MemMap* mem_map,
- DexLayout* dex_layout,
- CompactDexLevel compact_dex_level)
- : DexWriter(header, mem_map, dex_layout, /*compute_offsets*/ true),
- compact_dex_level_(compact_dex_level) {}
+ explicit CompactDexWriter(DexLayout* dex_layout);
+
+ protected:
+ class Deduper {
+ public:
+ static const uint32_t kDidNotDedupe = 0;
+
+ // if not enabled, Dedupe will always return kDidNotDedupe.
+ explicit Deduper(bool enabled, DexContainer::Section* section);
+
+ // Deduplicate a blob of data that has been written to mem_map.
+ // Returns the offset of the deduplicated data or kDidNotDedupe did deduplication did not occur.
+ uint32_t Dedupe(uint32_t data_start, uint32_t data_end, uint32_t item_offset);
+
+ // Clear dedupe state to prevent deduplication against existing items in the future.
+ void Clear() {
+ dedupe_map_.clear();
+ }
+
+ private:
+ class HashedMemoryRange {
+ public:
+ uint32_t offset_;
+ uint32_t length_;
+
+ class HashEqual {
+ public:
+ explicit HashEqual(DexContainer::Section* section) : section_(section) {}
+
+ // Equal function.
+ bool operator()(const HashedMemoryRange& a, const HashedMemoryRange& b) const {
+ if (a.length_ != b.length_) {
+ return false;
+ }
+ const uint8_t* data = Data();
+ DCHECK_LE(a.offset_ + a.length_, section_->Size());
+ DCHECK_LE(b.offset_ + b.length_, section_->Size());
+ return std::equal(data + a.offset_, data + a.offset_ + a.length_, data + b.offset_);
+ }
+
+ // Hash function.
+ size_t operator()(const HashedMemoryRange& range) const {
+ DCHECK_LE(range.offset_ + range.length_, section_->Size());
+ return HashBytes(Data() + range.offset_, range.length_);
+ }
+
+ ALWAYS_INLINE uint8_t* Data() const {
+ return section_->Begin();
+ }
+
+ private:
+ DexContainer::Section* const section_;
+ };
+ };
+
+ const bool enabled_;
+
+ // Dedupe map.
+ std::unordered_map<HashedMemoryRange,
+ uint32_t,
+ HashedMemoryRange::HashEqual,
+ HashedMemoryRange::HashEqual> dedupe_map_;
+ };
+
+ // Handles alignment and deduping of a data section item.
+ class ScopedDataSectionItem {
+ public:
+ ScopedDataSectionItem(Stream* stream, dex_ir::Item* item, size_t alignment, Deduper* deduper);
+ ~ScopedDataSectionItem();
+ size_t Written() const;
+
+ private:
+ Stream* const stream_;
+ dex_ir::Item* const item_;
+ const size_t alignment_;
+ Deduper* deduper_;
+ const uint32_t start_offset_;
+ };
+
+ public:
+ class Container : public DexContainer {
+ public:
+ Section* GetMainSection() OVERRIDE {
+ return &main_section_;
+ }
+
+ Section* GetDataSection() OVERRIDE {
+ return &data_section_;
+ }
+
+ bool IsCompactDexContainer() const OVERRIDE {
+ return true;
+ }
+
+ private:
+ explicit Container(bool dedupe_code_items);
+
+ VectorSection main_section_;
+ VectorSection data_section_;
+ Deduper code_item_dedupe_;
+ Deduper data_item_dedupe_;
+
+ friend class CompactDexWriter;
+ };
protected:
- void WriteHeader() OVERRIDE;
+ void Write(DexContainer* output) OVERRIDE;
+
+ std::unique_ptr<DexContainer> CreateDexContainer() const OVERRIDE;
+
+ void WriteHeader(Stream* stream) OVERRIDE;
size_t GetHeaderSize() const OVERRIDE;
- const CompactDexLevel compact_dex_level_;
+ uint32_t WriteDebugInfoOffsetTable(Stream* stream);
+
+ void WriteCodeItem(Stream* stream, dex_ir::CodeItem* code_item, bool reserve_only) OVERRIDE;
+
+ void WriteStringData(Stream* stream, dex_ir::StringData* string_data) OVERRIDE;
+
+ void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) OVERRIDE;
+
+ void SortDebugInfosByMethodIndex();
+
+ CompactDexLevel GetCompactDexLevel() const;
private:
+ // Position in the compact dex file for the debug info table data starts.
+ uint32_t debug_info_offsets_pos_ = 0u;
+
+ // Offset into the debug info table data where the lookup table is.
+ uint32_t debug_info_offsets_table_offset_ = 0u;
+
+ // Base offset of where debug info starts in the dex file.
+ uint32_t debug_info_base_ = 0u;
+
+ // State for where we are deduping.
+ Deduper* code_item_dedupe_ = nullptr;
+ Deduper* data_item_dedupe_ = nullptr;
+
DISALLOW_COPY_AND_ASSIGN(CompactDexWriter);
};
diff --git a/dexlayout/dex_container.h b/dexlayout/dex_container.h
new file mode 100644
index 0000000000..2b9a5f9959
--- /dev/null
+++ b/dexlayout/dex_container.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#ifndef ART_DEXLAYOUT_DEX_CONTAINER_H_
+#define ART_DEXLAYOUT_DEX_CONTAINER_H_
+
+#include <vector>
+
+namespace art {
+
+// Dex container holds the artifacts produced by dexlayout and contains up to two sections: a main
+// section and a data section.
+// This container may also hold metadata used for multi dex deduplication in the future.
+class DexContainer {
+ public:
+ virtual ~DexContainer() {}
+
+ class Section {
+ public:
+ virtual ~Section() {}
+
+ // Returns the start of the memory region.
+ virtual uint8_t* Begin() = 0;
+
+ // Size in bytes.
+ virtual size_t Size() const = 0;
+
+ // Resize the backing storage.
+ virtual void Resize(size_t size) = 0;
+
+ // Clear the container.
+ virtual void Clear() = 0;
+
+ // Returns the end of the memory region.
+ uint8_t* End() {
+ return Begin() + Size();
+ }
+ };
+
+ // Vector backed section.
+ class VectorSection : public Section {
+ public:
+ virtual ~VectorSection() {}
+
+ uint8_t* Begin() OVERRIDE {
+ return &data_[0];
+ }
+
+ size_t Size() const OVERRIDE {
+ return data_.size();
+ }
+
+ void Resize(size_t size) OVERRIDE {
+ data_.resize(size, 0u);
+ }
+
+ void Clear() OVERRIDE {
+ data_.clear();
+ }
+
+ private:
+ std::vector<uint8_t> data_;
+ };
+
+ virtual Section* GetMainSection() = 0;
+ virtual Section* GetDataSection() = 0;
+ virtual bool IsCompactDexContainer() const = 0;
+};
+
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEX_CONTAINER_H_
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 2191ea601f..1525d537b7 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -280,7 +280,7 @@ void Collections::ReadEncodedValue(const DexFile& dex_file,
}
case DexFile::kDexAnnotationArray: {
EncodedValueVector* values = new EncodedValueVector();
- const uint32_t offset = *data - dex_file.Begin();
+ const uint32_t offset = *data - dex_file.DataBegin();
const uint32_t size = DecodeUnsignedLeb128(data);
// Decode all elements.
for (uint32_t i = 0; i < size; i++) {
@@ -440,7 +440,7 @@ void Collections::AddAnnotationsFromMapListSection(const DexFile& dex_file,
AnnotationItem* Collections::CreateAnnotationItem(const DexFile& dex_file,
const DexFile::AnnotationItem* annotation) {
const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
- const uint32_t offset = start_data - dex_file.Begin();
+ const uint32_t offset = start_data - dex_file.DataBegin();
AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
if (annotation_item == nullptr) {
uint8_t visibility = annotation->visibility_;
@@ -565,16 +565,23 @@ ParameterAnnotation* Collections::GenerateParameterAnnotation(
return new ParameterAnnotation(method_id, set_ref_list);
}
-CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem& disk_code_item, uint32_t offset) {
- CodeItemDebugInfoAccessor accessor(dex_file, &disk_code_item);
- const uint16_t registers_size = accessor.RegistersSize();
- const uint16_t ins_size = accessor.InsSize();
- const uint16_t outs_size = accessor.OutsSize();
- const uint32_t tries_size = accessor.TriesSize();
-
- // TODO: Calculate the size of the debug info.
+CodeItem* Collections::DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index) {
+ if (disk_code_item == nullptr) {
+ return nullptr;
+ }
+ CodeItemDebugInfoAccessor accessor(dex_file, disk_code_item, dex_method_index);
const uint32_t debug_info_offset = accessor.DebugInfoOffset();
+
+ // Create the offsets pair and dedupe based on it.
+ std::pair<uint32_t, uint32_t> offsets_pair(offset, debug_info_offset);
+ auto existing = code_items_map_.find(offsets_pair);
+ if (existing != code_items_map_.end()) {
+ return existing->second;
+ }
+
const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(debug_info_offset);
DebugInfoItem* debug_info = nullptr;
if (debug_info_stream != nullptr) {
@@ -594,7 +601,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
TryItemVector* tries = nullptr;
CatchHandlerVector* handler_list = nullptr;
- if (tries_size > 0) {
+ if (accessor.TriesSize() > 0) {
tries = new TryItemVector();
handler_list = new CatchHandlerVector();
for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
@@ -669,11 +676,25 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
}
- uint32_t size = dex_file.GetCodeItemSize(disk_code_item);
- CodeItem* code_item = new CodeItem(
- registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list);
+ uint32_t size = dex_file.GetCodeItemSize(*disk_code_item);
+ CodeItem* code_item = new CodeItem(accessor.RegistersSize(),
+ accessor.InsSize(),
+ accessor.OutsSize(),
+ debug_info,
+ insns_size,
+ insns,
+ tries,
+ handler_list);
code_item->SetSize(size);
- AddItem(code_items_map_, code_items_, code_item, offset);
+
+ // Add the code item to the map.
+ DCHECK(!code_item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ code_item->SetOffset(offset);
+ }
+ code_items_map_.emplace(offsets_pair, code_item);
+ code_items_.AddItem(code_item);
+
// Add "fixup" references to types, strings, methods, and fields.
// This is temporary, as we will probably want more detailed parsing of the
// instructions here.
@@ -701,14 +722,12 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
MethodId* method_id = GetMethodId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = code_items_map_.GetExistingObject(cdii.GetMethodCodeItemOffset());
- DebugInfoItem* debug_info = nullptr;
- if (disk_code_item != nullptr) {
- if (code_item == nullptr) {
- code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset());
- }
- debug_info = code_item->DebugInfo();
- }
+ // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
+ // they may have different debug info streams.
+ CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
+ disk_code_item,
+ cdii.GetMethodCodeItemOffset(),
+ cdii.GetMemberIndex());
return new MethodItem(access_flags, method_id, code_item);
}
@@ -753,8 +772,7 @@ ClassData* Collections::CreateClassData(
void Collections::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
// Iterate through the map list and set the offset of the CallSiteIds and MethodHandleItems.
- const DexFile::MapList* map =
- reinterpret_cast<const DexFile::MapList*>(dex_file.Begin() + MapListOffset());
+ const DexFile::MapList* map = dex_file.GetMapList();
for (uint32_t i = 0; i < map->size_; ++i) {
const DexFile::MapItem* item = map->list_ + i;
switch (item->type_) {
@@ -780,7 +798,7 @@ void Collections::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
void Collections::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
- const uint8_t* disk_call_item_ptr = dex_file.Begin() + disk_call_site_id.data_off_;
+ const uint8_t* disk_call_item_ptr = dex_file.DataBegin() + disk_call_site_id.data_off_;
EncodedArrayItem* call_site_item =
CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
@@ -814,16 +832,16 @@ void Collections::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
}
void Collections::SortVectorsByMapOrder() {
- string_datas_map_.SortVectorByMapOrder(string_datas_);
- type_lists_map_.SortVectorByMapOrder(type_lists_);
- encoded_array_items_map_.SortVectorByMapOrder(encoded_array_items_);
- annotation_items_map_.SortVectorByMapOrder(annotation_items_);
- annotation_set_items_map_.SortVectorByMapOrder(annotation_set_items_);
- annotation_set_ref_lists_map_.SortVectorByMapOrder(annotation_set_ref_lists_);
- annotations_directory_items_map_.SortVectorByMapOrder(annotations_directory_items_);
- debug_info_items_map_.SortVectorByMapOrder(debug_info_items_);
- code_items_map_.SortVectorByMapOrder(code_items_);
- class_datas_map_.SortVectorByMapOrder(class_datas_);
+ string_datas_.SortByMapOrder(string_datas_map_.Collection());
+ type_lists_.SortByMapOrder(type_lists_map_.Collection());
+ encoded_array_items_.SortByMapOrder(encoded_array_items_map_.Collection());
+ annotation_items_.SortByMapOrder(annotation_items_map_.Collection());
+ annotation_set_items_.SortByMapOrder(annotation_set_items_map_.Collection());
+ annotation_set_ref_lists_.SortByMapOrder(annotation_set_ref_lists_map_.Collection());
+ annotations_directory_items_.SortByMapOrder(annotations_directory_items_map_.Collection());
+ debug_info_items_.SortByMapOrder(debug_info_items_map_.Collection());
+ code_items_.SortByMapOrder(code_items_map_);
+ class_datas_.SortByMapOrder(class_datas_map_.Collection());
}
static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 6797fa5dd6..d28b824c7b 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -27,8 +27,8 @@
#include "base/stl_util.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_types.h"
+#include "dex/utf.h"
#include "leb128.h"
-#include "utf.h"
namespace art {
namespace dex_ir {
@@ -133,6 +133,21 @@ template<class T> class CollectionVector : public CollectionBase<T> {
uint32_t Size() const { return collection_.size(); }
Vector& Collection() { return collection_; }
+ const Vector& Collection() const { return collection_; }
+
+ // Sort the vector by copying pointers over.
+ template <typename MapType>
+ void SortByMapOrder(const MapType& map) {
+ auto it = map.begin();
+ CHECK_EQ(map.size(), Size());
+ for (size_t i = 0; i < Size(); ++i) {
+ // There are times when the array will temporarily contain the same pointer twice, doing the
+ // release here sure there is no double free errors.
+ Collection()[i].release();
+ Collection()[i].reset(it->second);
+ ++it;
+ }
+ }
protected:
Vector collection_;
@@ -171,22 +186,10 @@ template<class T> class CollectionMap : public CollectionBase<T> {
return it != collection_.end() ? it->second : nullptr;
}
- uint32_t Size() const { return collection_.size(); }
+ // Lower case for template interop with std::map.
+ uint32_t size() const { return collection_.size(); }
std::map<uint32_t, T*>& Collection() { return collection_; }
- // Sort the vector by copying pointers over.
- void SortVectorByMapOrder(CollectionVector<T>& vector) {
- auto it = collection_.begin();
- CHECK_EQ(vector.Size(), Size());
- for (size_t i = 0; i < Size(); ++i) {
- // There are times when the array will temporarily contain the same pointer twice, doing the
- // release here sure there is no double free errors.
- vector.Collection()[i].release();
- vector.Collection()[i].reset(it->second);
- ++it;
- }
- }
-
private:
std::map<uint32_t, T*> collection_;
@@ -230,6 +233,8 @@ class Collections {
CollectionVector<CodeItem>::Vector& CodeItems() { return code_items_.Collection(); }
CollectionVector<ClassData>::Vector& ClassDatas() { return class_datas_.Collection(); }
+ const CollectionVector<ClassDef>::Vector& ClassDefs() const { return class_defs_.Collection(); }
+
void CreateStringId(const DexFile& dex_file, uint32_t i);
void CreateTypeId(const DexFile& dex_file, uint32_t i);
void CreateProtoId(const DexFile& dex_file, uint32_t i);
@@ -251,8 +256,10 @@ class Collections {
const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
- CodeItem* CreateCodeItem(
- const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
+ CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index);
ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
void AddAnnotationsFromMapListSection(const DexFile& dex_file,
uint32_t start_offset,
@@ -455,7 +462,10 @@ class Collections {
CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_map_;
CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_map_;
CollectionMap<DebugInfoItem> debug_info_items_map_;
- CollectionMap<CodeItem> code_items_map_;
+ // Code item maps need to check both the debug info offset and debug info offset, do not use
+ // CollectionMap.
+ // First offset is the code item offset, second is the debug info offset.
+ std::map<std::pair<uint32_t, uint32_t>, CodeItem*> code_items_map_;
CollectionMap<ClassData> class_datas_map_;
uint32_t map_list_offset_ = 0;
@@ -476,11 +486,11 @@ class Item {
virtual ~Item() { }
// Return the assigned offset.
- uint32_t GetOffset() const {
+ uint32_t GetOffset() const WARN_UNUSED {
CHECK(OffsetAssigned());
return offset_;
}
- uint32_t GetSize() const { return size_; }
+ uint32_t GetSize() const WARN_UNUSED { return size_; }
void SetOffset(uint32_t offset) { offset_ = offset; }
void SetSize(uint32_t size) { size_ = size; }
bool OffsetAssigned() const {
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 231826b7a8..4f9bcdd742 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -20,13 +20,18 @@
#include <vector>
#include "dex_ir_builder.h"
+#include "dexlayout.h"
namespace art {
namespace dex_ir {
-static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* collections);
+static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
+ Collections* collections,
+ const Options& options);
-Header* DexIrBuilder(const DexFile& dex_file, bool eagerly_assign_offsets) {
+Header* DexIrBuilder(const DexFile& dex_file,
+ bool eagerly_assign_offsets,
+ const Options& options) {
const DexFile::Header& disk_header = dex_file.GetHeader();
Header* header = new Header(disk_header.magic_,
disk_header.checksum_,
@@ -70,30 +75,40 @@ Header* DexIrBuilder(const DexFile& dex_file, bool eagerly_assign_offsets) {
// ClassDef table.
collections.SetClassDefsOffset(disk_header.class_defs_off_);
for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+ if (!options.class_filter_.empty()) {
+ // If the filter is enabled (not empty), filter out classes that don't have a matching
+ // descriptor.
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ const char* descriptor = dex_file.GetClassDescriptor(class_def);
+ if (options.class_filter_.find(descriptor) == options.class_filter_.end()) {
+ continue;
+ }
+ }
collections.CreateClassDef(dex_file, i);
}
// MapItem.
collections.SetMapListOffset(disk_header.map_off_);
// CallSiteIds and MethodHandleItems.
collections.CreateCallSitesAndMethodHandles(dex_file);
- CheckAndSetRemainingOffsets(dex_file, &collections);
+ CheckAndSetRemainingOffsets(dex_file, &collections, options);
// Sort the vectors by the map order (same order as the file).
collections.SortVectorsByMapOrder();
// Load the link data if it exists.
collections.SetLinkData(std::vector<uint8_t>(
- dex_file.Begin() + dex_file.GetHeader().link_off_,
- dex_file.Begin() + dex_file.GetHeader().link_off_ + dex_file.GetHeader().link_size_));
+ dex_file.DataBegin() + dex_file.GetHeader().link_off_,
+ dex_file.DataBegin() + dex_file.GetHeader().link_off_ + dex_file.GetHeader().link_size_));
return header;
}
-static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* collections) {
+static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
+ Collections* collections,
+ const Options& options) {
const DexFile::Header& disk_header = dex_file.GetHeader();
// Read MapItems and validate/set remaining offsets.
- const DexFile::MapList* map =
- reinterpret_cast<const DexFile::MapList*>(dex_file.Begin() + disk_header.map_off_);
+ const DexFile::MapList* map = dex_file.GetMapList();
const uint32_t count = map->size_;
for (uint32_t i = 0; i < count; ++i) {
const DexFile::MapItem* item = map->list_ + i;
@@ -123,7 +138,10 @@ static void CheckAndSetRemainingOffsets(const DexFile& dex_file, Collections* co
CHECK_EQ(item->offset_, collections->MethodIdsOffset());
break;
case DexFile::kDexTypeClassDefItem:
- CHECK_EQ(item->size_, collections->ClassDefsSize());
+ if (options.class_filter_.empty()) {
+ // The filter may have removed some classes, this will get fixed up during writing.
+ CHECK_EQ(item->size_, collections->ClassDefsSize());
+ }
CHECK_EQ(item->offset_, collections->ClassDefsOffset());
break;
case DexFile::kDexTypeCallSiteIdItem:
diff --git a/dexlayout/dex_ir_builder.h b/dexlayout/dex_ir_builder.h
index 4d4b4e8699..9f5377fe56 100644
--- a/dexlayout/dex_ir_builder.h
+++ b/dexlayout/dex_ir_builder.h
@@ -22,11 +22,16 @@
#include "dex_ir.h"
namespace art {
+
+class Options;
+
namespace dex_ir {
-// Eagerly assign offsets assigns offsets based on the original offsets in the input dex file. If
-// this not done, dex_ir::Item::GetOffset will abort when reading uninitialized offsets.
-dex_ir::Header* DexIrBuilder(const DexFile& dex_file, bool eagerly_assign_offsets);
+// Eagerly assign offsets based on the original offsets in the input dex file. If this is not done,
+// dex_ir::Item::GetOffset will abort when reading uninitialized offsets.
+dex_ir::Header* DexIrBuilder(const DexFile& dex_file,
+ bool eagerly_assign_offsets,
+ const Options& options);
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index e4ed69b8d2..516a3382fd 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -29,6 +29,8 @@
#include <memory>
#include <vector>
+#include <android-base/logging.h>
+
#include "dex_ir.h"
#include "dexlayout.h"
#include "jit/profile_compilation_info.h"
@@ -246,7 +248,7 @@ void VisualizeDexLayout(dex_ir::Header* header,
ProfileCompilationInfo* profile_info) {
std::unique_ptr<Dumper> dumper(new Dumper(header));
if (!dumper->OpenAndPrintHeader(dex_file_index)) {
- fprintf(stderr, "Could not open output file.\n");
+ LOG(ERROR) << "Could not open output file.";
return;
}
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 489a6b15ba..808bfad029 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -25,31 +25,14 @@
#include "dex/dex_file_layout.h"
#include "dex/dex_file_types.h"
#include "dex/standard_dex_file.h"
+#include "dex/utf.h"
#include "dexlayout.h"
-#include "utf.h"
namespace art {
-static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
-static constexpr uint32_t kDexSectionWordAlignment = 4;
+constexpr uint32_t DexWriter::kDataSectionAlignment;
-static constexpr uint32_t SectionAlignment(DexFile::MapItemType type) {
- switch (type) {
- case DexFile::kDexTypeClassDataItem:
- case DexFile::kDexTypeStringDataItem:
- case DexFile::kDexTypeDebugInfoItem:
- case DexFile::kDexTypeAnnotationItem:
- case DexFile::kDexTypeEncodedArrayItem:
- return alignof(uint8_t);
-
- default:
- // All other sections are kDexAlignedSection.
- return kDexSectionWordAlignment;
- }
-}
-
-
-size_t EncodeIntValue(int32_t value, uint8_t* buffer) {
+static size_t EncodeIntValue(int32_t value, uint8_t* buffer) {
size_t length = 0;
if (value >= 0) {
while (value > 0x7f) {
@@ -66,7 +49,7 @@ size_t EncodeIntValue(int32_t value, uint8_t* buffer) {
return length;
}
-size_t EncodeUIntValue(uint32_t value, uint8_t* buffer) {
+static size_t EncodeUIntValue(uint32_t value, uint8_t* buffer) {
size_t length = 0;
do {
buffer[length++] = static_cast<uint8_t>(value);
@@ -75,7 +58,7 @@ size_t EncodeUIntValue(uint32_t value, uint8_t* buffer) {
return length;
}
-size_t EncodeLongValue(int64_t value, uint8_t* buffer) {
+static size_t EncodeLongValue(int64_t value, uint8_t* buffer) {
size_t length = 0;
if (value >= 0) {
while (value > 0x7f) {
@@ -97,7 +80,7 @@ union FloatUnion {
uint32_t i_;
};
-size_t EncodeFloatValue(float value, uint8_t* buffer) {
+static size_t EncodeFloatValue(float value, uint8_t* buffer) {
FloatUnion float_union;
float_union.f_ = value;
uint32_t int_value = float_union.i_;
@@ -114,7 +97,7 @@ union DoubleUnion {
uint64_t l_;
};
-size_t EncodeDoubleValue(double value, uint8_t* buffer) {
+static size_t EncodeDoubleValue(double value, uint8_t* buffer) {
DoubleUnion double_union;
double_union.d_ = value;
uint64_t long_value = double_union.l_;
@@ -126,26 +109,12 @@ size_t EncodeDoubleValue(double value, uint8_t* buffer) {
return 7 - index;
}
-size_t DexWriter::Write(const void* buffer, size_t length, size_t offset) {
- DCHECK_LE(offset + length, mem_map_->Size());
- memcpy(mem_map_->Begin() + offset, buffer, length);
- return length;
-}
-
-size_t DexWriter::WriteSleb128(uint32_t value, size_t offset) {
- uint8_t buffer[8];
- EncodeSignedLeb128(buffer, value);
- return Write(buffer, SignedLeb128Size(value), offset);
-}
+DexWriter::DexWriter(DexLayout* dex_layout, bool compute_offsets)
+ : header_(dex_layout->GetHeader()),
+ dex_layout_(dex_layout),
+ compute_offsets_(compute_offsets) {}
-size_t DexWriter::WriteUleb128(uint32_t value, size_t offset) {
- uint8_t buffer[8];
- EncodeUnsignedLeb128(buffer, value);
- return Write(buffer, UnsignedLeb128Size(value), offset);
-}
-
-size_t DexWriter::WriteEncodedValue(dex_ir::EncodedValue* encoded_value, size_t offset) {
- size_t original_offset = offset;
+void DexWriter::WriteEncodedValue(Stream* stream, dex_ir::EncodedValue* encoded_value) {
size_t start = 0;
size_t length;
uint8_t buffer[8];
@@ -194,284 +163,271 @@ size_t DexWriter::WriteEncodedValue(dex_ir::EncodedValue* encoded_value, size_t
length = EncodeUIntValue(encoded_value->GetMethodId()->GetIndex(), buffer);
break;
case DexFile::kDexAnnotationArray:
- offset += WriteEncodedValueHeader(type, 0, offset);
- offset += WriteEncodedArray(encoded_value->GetEncodedArray()->GetEncodedValues(), offset);
- return offset - original_offset;
+ WriteEncodedValueHeader(stream, type, 0);
+ WriteEncodedArray(stream, encoded_value->GetEncodedArray()->GetEncodedValues());
+ return;
case DexFile::kDexAnnotationAnnotation:
- offset += WriteEncodedValueHeader(type, 0, offset);
- offset += WriteEncodedAnnotation(encoded_value->GetEncodedAnnotation(), offset);
- return offset - original_offset;
+ WriteEncodedValueHeader(stream, type, 0);
+ WriteEncodedAnnotation(stream, encoded_value->GetEncodedAnnotation());
+ return;
case DexFile::kDexAnnotationNull:
- return WriteEncodedValueHeader(type, 0, offset);
+ WriteEncodedValueHeader(stream, type, 0);
+ return;
case DexFile::kDexAnnotationBoolean:
- return WriteEncodedValueHeader(type, encoded_value->GetBoolean() ? 1 : 0, offset);
+ WriteEncodedValueHeader(stream, type, encoded_value->GetBoolean() ? 1 : 0);
+ return;
default:
- return 0;
+ return;
}
- offset += WriteEncodedValueHeader(type, length - 1, offset);
- offset += Write(buffer + start, length, offset);
- return offset - original_offset;
+ WriteEncodedValueHeader(stream, type, length - 1);
+ stream->Write(buffer + start, length);
}
-size_t DexWriter::WriteEncodedValueHeader(int8_t value_type, size_t value_arg, size_t offset) {
+void DexWriter::WriteEncodedValueHeader(Stream* stream, int8_t value_type, size_t value_arg) {
uint8_t buffer[1] = { static_cast<uint8_t>((value_arg << 5) | value_type) };
- return Write(buffer, sizeof(uint8_t), offset);
+ stream->Write(buffer, sizeof(uint8_t));
}
-size_t DexWriter::WriteEncodedArray(dex_ir::EncodedValueVector* values, size_t offset) {
- size_t original_offset = offset;
- offset += WriteUleb128(values->size(), offset);
+void DexWriter::WriteEncodedArray(Stream* stream, dex_ir::EncodedValueVector* values) {
+ stream->WriteUleb128(values->size());
for (std::unique_ptr<dex_ir::EncodedValue>& value : *values) {
- offset += WriteEncodedValue(value.get(), offset);
+ WriteEncodedValue(stream, value.get());
}
- return offset - original_offset;
}
-size_t DexWriter::WriteEncodedAnnotation(dex_ir::EncodedAnnotation* annotation, size_t offset) {
- size_t original_offset = offset;
- offset += WriteUleb128(annotation->GetType()->GetIndex(), offset);
- offset += WriteUleb128(annotation->GetAnnotationElements()->size(), offset);
+void DexWriter::WriteEncodedAnnotation(Stream* stream, dex_ir::EncodedAnnotation* annotation) {
+ stream->WriteUleb128(annotation->GetType()->GetIndex());
+ stream->WriteUleb128(annotation->GetAnnotationElements()->size());
for (std::unique_ptr<dex_ir::AnnotationElement>& annotation_element :
*annotation->GetAnnotationElements()) {
- offset += WriteUleb128(annotation_element->GetName()->GetIndex(), offset);
- offset += WriteEncodedValue(annotation_element->GetValue(), offset);
+ stream->WriteUleb128(annotation_element->GetName()->GetIndex());
+ WriteEncodedValue(stream, annotation_element->GetValue());
}
- return offset - original_offset;
}
-size_t DexWriter::WriteEncodedFields(dex_ir::FieldItemVector* fields, size_t offset) {
- size_t original_offset = offset;
+void DexWriter::WriteEncodedFields(Stream* stream, dex_ir::FieldItemVector* fields) {
uint32_t prev_index = 0;
for (std::unique_ptr<dex_ir::FieldItem>& field : *fields) {
uint32_t index = field->GetFieldId()->GetIndex();
- offset += WriteUleb128(index - prev_index, offset);
- offset += WriteUleb128(field->GetAccessFlags(), offset);
+ stream->WriteUleb128(index - prev_index);
+ stream->WriteUleb128(field->GetAccessFlags());
prev_index = index;
}
- return offset - original_offset;
}
-size_t DexWriter::WriteEncodedMethods(dex_ir::MethodItemVector* methods, size_t offset) {
- size_t original_offset = offset;
+void DexWriter::WriteEncodedMethods(Stream* stream, dex_ir::MethodItemVector* methods) {
uint32_t prev_index = 0;
for (std::unique_ptr<dex_ir::MethodItem>& method : *methods) {
uint32_t index = method->GetMethodId()->GetIndex();
uint32_t code_off = method->GetCodeItem() == nullptr ? 0 : method->GetCodeItem()->GetOffset();
- offset += WriteUleb128(index - prev_index, offset);
- offset += WriteUleb128(method->GetAccessFlags(), offset);
- offset += WriteUleb128(code_off, offset);
+ stream->WriteUleb128(index - prev_index);
+ stream->WriteUleb128(method->GetAccessFlags());
+ stream->WriteUleb128(code_off);
prev_index = index;
}
- return offset - original_offset;
}
// TODO: Refactor this to remove duplicated boiler plate. One way to do this is adding
// function that takes a CollectionVector<T> and uses overloading.
-uint32_t DexWriter::WriteStringIds(uint32_t offset, bool reserve_only) {
- const uint32_t start = offset;
+void DexWriter::WriteStringIds(Stream* stream, bool reserve_only) {
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::StringId>& string_id : header_->GetCollections().StringIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeStringIdItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeStringIdItem));
if (reserve_only) {
- offset += string_id->GetSize();
+ stream->Skip(string_id->GetSize());
} else {
uint32_t string_data_off = string_id->DataItem()->GetOffset();
- offset += Write(&string_data_off, string_id->GetSize(), offset);
+ stream->Write(&string_data_off, string_id->GetSize());
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetStringIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteStringDatas(uint32_t offset) {
- const uint32_t start = offset;
+void DexWriter::WriteStringData(Stream* stream, dex_ir::StringData* string_data) {
+ ProcessOffset(stream, string_data);
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeStringDataItem));
+ stream->WriteUleb128(CountModifiedUtf8Chars(string_data->Data()));
+ stream->Write(string_data->Data(), strlen(string_data->Data()));
+ // Skip null terminator (already zeroed out, no need to write).
+ stream->Skip(1);
+}
+
+void DexWriter::WriteStringDatas(Stream* stream) {
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::StringData>& string_data : header_->GetCollections().StringDatas()) {
- ProcessOffset(&offset, string_data.get());
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeStringDataItem));
- offset += WriteUleb128(CountModifiedUtf8Chars(string_data->Data()), offset);
- // Skip null terminator (already zeroed out, no need to write).
- offset += Write(string_data->Data(), strlen(string_data->Data()), offset) + 1u;
+ WriteStringData(stream, string_data.get());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetStringDatasOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteTypeIds(uint32_t offset) {
+void DexWriter::WriteTypeIds(Stream* stream) {
uint32_t descriptor_idx[1];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::TypeId>& type_id : header_->GetCollections().TypeIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeTypeIdItem));
- ProcessOffset(&offset, type_id.get());
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeTypeIdItem));
+ ProcessOffset(stream, type_id.get());
descriptor_idx[0] = type_id->GetStringId()->GetIndex();
- offset += Write(descriptor_idx, type_id->GetSize(), offset);
+ stream->Write(descriptor_idx, type_id->GetSize());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetTypeIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteTypeLists(uint32_t offset) {
+void DexWriter::WriteTypeLists(Stream* stream) {
uint32_t size[1];
uint16_t list[1];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::TypeList>& type_list : header_->GetCollections().TypeLists()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeTypeList));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeTypeList));
size[0] = type_list->GetTypeList()->size();
- ProcessOffset(&offset, type_list.get());
- offset += Write(size, sizeof(uint32_t), offset);
+ ProcessOffset(stream, type_list.get());
+ stream->Write(size, sizeof(uint32_t));
for (const dex_ir::TypeId* type_id : *type_list->GetTypeList()) {
list[0] = type_id->GetIndex();
- offset += Write(list, sizeof(uint16_t), offset);
+ stream->Write(list, sizeof(uint16_t));
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetTypeListsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteProtoIds(uint32_t offset, bool reserve_only) {
+void DexWriter::WriteProtoIds(Stream* stream, bool reserve_only) {
uint32_t buffer[3];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::ProtoId>& proto_id : header_->GetCollections().ProtoIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeProtoIdItem));
- ProcessOffset(&offset, proto_id.get());
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeProtoIdItem));
+ ProcessOffset(stream, proto_id.get());
if (reserve_only) {
- offset += proto_id->GetSize();
+ stream->Skip(proto_id->GetSize());
} else {
buffer[0] = proto_id->Shorty()->GetIndex();
buffer[1] = proto_id->ReturnType()->GetIndex();
buffer[2] = proto_id->Parameters() == nullptr ? 0 : proto_id->Parameters()->GetOffset();
- offset += Write(buffer, proto_id->GetSize(), offset);
+ stream->Write(buffer, proto_id->GetSize());
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetProtoIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteFieldIds(uint32_t offset) {
+void DexWriter::WriteFieldIds(Stream* stream) {
uint16_t buffer[4];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::FieldId>& field_id : header_->GetCollections().FieldIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeFieldIdItem));
- ProcessOffset(&offset, field_id.get());
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeFieldIdItem));
+ ProcessOffset(stream, field_id.get());
buffer[0] = field_id->Class()->GetIndex();
buffer[1] = field_id->Type()->GetIndex();
buffer[2] = field_id->Name()->GetIndex();
buffer[3] = field_id->Name()->GetIndex() >> 16;
- offset += Write(buffer, field_id->GetSize(), offset);
+ stream->Write(buffer, field_id->GetSize());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetFieldIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteMethodIds(uint32_t offset) {
+void DexWriter::WriteMethodIds(Stream* stream) {
uint16_t buffer[4];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::MethodId>& method_id : header_->GetCollections().MethodIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMethodIdItem));
- ProcessOffset(&offset, method_id.get());
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeMethodIdItem));
+ ProcessOffset(stream, method_id.get());
buffer[0] = method_id->Class()->GetIndex();
buffer[1] = method_id->Proto()->GetIndex();
buffer[2] = method_id->Name()->GetIndex();
buffer[3] = method_id->Name()->GetIndex() >> 16;
- offset += Write(buffer, method_id->GetSize(), offset);
+ stream->Write(buffer, method_id->GetSize());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetMethodIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteEncodedArrays(uint32_t offset) {
- const uint32_t start = offset;
+void DexWriter::WriteEncodedArrays(Stream* stream) {
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::EncodedArrayItem>& encoded_array :
header_->GetCollections().EncodedArrayItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeEncodedArrayItem));
- ProcessOffset(&offset, encoded_array.get());
- offset += WriteEncodedArray(encoded_array->GetEncodedValues(), offset);
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeEncodedArrayItem));
+ ProcessOffset(stream, encoded_array.get());
+ WriteEncodedArray(stream, encoded_array->GetEncodedValues());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetEncodedArrayItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteAnnotations(uint32_t offset) {
+void DexWriter::WriteAnnotations(Stream* stream) {
uint8_t visibility[1];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::AnnotationItem>& annotation :
header_->GetCollections().AnnotationItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationItem));
visibility[0] = annotation->GetVisibility();
- ProcessOffset(&offset, annotation.get());
- offset += Write(visibility, sizeof(uint8_t), offset);
- offset += WriteEncodedAnnotation(annotation->GetAnnotation(), offset);
+ ProcessOffset(stream, annotation.get());
+ stream->Write(visibility, sizeof(uint8_t));
+ WriteEncodedAnnotation(stream, annotation->GetAnnotation());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetAnnotationItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteAnnotationSets(uint32_t offset) {
+void DexWriter::WriteAnnotationSets(Stream* stream) {
uint32_t size[1];
uint32_t annotation_off[1];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::AnnotationSetItem>& annotation_set :
header_->GetCollections().AnnotationSetItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationSetItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationSetItem));
size[0] = annotation_set->GetItems()->size();
- ProcessOffset(&offset, annotation_set.get());
- offset += Write(size, sizeof(uint32_t), offset);
+ ProcessOffset(stream, annotation_set.get());
+ stream->Write(size, sizeof(uint32_t));
for (dex_ir::AnnotationItem* annotation : *annotation_set->GetItems()) {
annotation_off[0] = annotation->GetOffset();
- offset += Write(annotation_off, sizeof(uint32_t), offset);
+ stream->Write(annotation_off, sizeof(uint32_t));
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetAnnotationSetItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteAnnotationSetRefs(uint32_t offset) {
+void DexWriter::WriteAnnotationSetRefs(Stream* stream) {
uint32_t size[1];
uint32_t annotations_off[1];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::AnnotationSetRefList>& annotation_set_ref :
header_->GetCollections().AnnotationSetRefLists()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationSetRefList));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationSetRefList));
size[0] = annotation_set_ref->GetItems()->size();
- ProcessOffset(&offset, annotation_set_ref.get());
- offset += Write(size, sizeof(uint32_t), offset);
+ ProcessOffset(stream, annotation_set_ref.get());
+ stream->Write(size, sizeof(uint32_t));
for (dex_ir::AnnotationSetItem* annotation_set : *annotation_set_ref->GetItems()) {
annotations_off[0] = annotation_set == nullptr ? 0 : annotation_set->GetOffset();
- offset += Write(annotations_off, sizeof(uint32_t), offset);
+ stream->Write(annotations_off, sizeof(uint32_t));
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetAnnotationSetRefListsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteAnnotationsDirectories(uint32_t offset) {
+void DexWriter::WriteAnnotationsDirectories(Stream* stream) {
uint32_t directory_buffer[4];
uint32_t annotation_buffer[2];
- const uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::AnnotationsDirectoryItem>& annotations_directory :
header_->GetCollections().AnnotationsDirectoryItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeAnnotationsDirectoryItem));
- ProcessOffset(&offset, annotations_directory.get());
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationsDirectoryItem));
+ ProcessOffset(stream, annotations_directory.get());
directory_buffer[0] = annotations_directory->GetClassAnnotation() == nullptr ? 0 :
annotations_directory->GetClassAnnotation()->GetOffset();
directory_buffer[1] = annotations_directory->GetFieldAnnotations() == nullptr ? 0 :
@@ -480,13 +436,13 @@ uint32_t DexWriter::WriteAnnotationsDirectories(uint32_t offset) {
annotations_directory->GetMethodAnnotations()->size();
directory_buffer[3] = annotations_directory->GetParameterAnnotations() == nullptr ? 0 :
annotations_directory->GetParameterAnnotations()->size();
- offset += Write(directory_buffer, 4 * sizeof(uint32_t), offset);
+ stream->Write(directory_buffer, 4 * sizeof(uint32_t));
if (annotations_directory->GetFieldAnnotations() != nullptr) {
for (std::unique_ptr<dex_ir::FieldAnnotation>& field :
*annotations_directory->GetFieldAnnotations()) {
annotation_buffer[0] = field->GetFieldId()->GetIndex();
annotation_buffer[1] = field->GetAnnotationSetItem()->GetOffset();
- offset += Write(annotation_buffer, 2 * sizeof(uint32_t), offset);
+ stream->Write(annotation_buffer, 2 * sizeof(uint32_t));
}
}
if (annotations_directory->GetMethodAnnotations() != nullptr) {
@@ -494,7 +450,7 @@ uint32_t DexWriter::WriteAnnotationsDirectories(uint32_t offset) {
*annotations_directory->GetMethodAnnotations()) {
annotation_buffer[0] = method->GetMethodId()->GetIndex();
annotation_buffer[1] = method->GetAnnotationSetItem()->GetOffset();
- offset += Write(annotation_buffer, 2 * sizeof(uint32_t), offset);
+ stream->Write(annotation_buffer, 2 * sizeof(uint32_t));
}
}
if (annotations_directory->GetParameterAnnotations() != nullptr) {
@@ -502,108 +458,132 @@ uint32_t DexWriter::WriteAnnotationsDirectories(uint32_t offset) {
*annotations_directory->GetParameterAnnotations()) {
annotation_buffer[0] = parameter->GetMethodId()->GetIndex();
annotation_buffer[1] = parameter->GetAnnotations()->GetOffset();
- offset += Write(annotation_buffer, 2 * sizeof(uint32_t), offset);
+ stream->Write(annotation_buffer, 2 * sizeof(uint32_t));
}
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetAnnotationsDirectoryItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteDebugInfoItems(uint32_t offset) {
- const uint32_t start = offset;
+void DexWriter::WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info) {
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeDebugInfoItem));
+ ProcessOffset(stream, debug_info);
+ stream->Write(debug_info->GetDebugInfo(), debug_info->GetDebugInfoSize());
+}
+
+void DexWriter::WriteDebugInfoItems(Stream* stream) {
+ const uint32_t start = stream->Tell();
for (std::unique_ptr<dex_ir::DebugInfoItem>& debug_info :
header_->GetCollections().DebugInfoItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeDebugInfoItem));
- ProcessOffset(&offset, debug_info.get());
- offset += Write(debug_info->GetDebugInfo(), debug_info->GetDebugInfoSize(), offset);
+ WriteDebugInfoItem(stream, debug_info.get());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetDebugInfoItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteCodeItems(uint32_t offset, bool reserve_only) {
+void DexWriter::WriteCodeItemPostInstructionData(Stream* stream,
+ dex_ir::CodeItem* code_item,
+ bool reserve_only) {
+ if (code_item->TriesSize() != 0) {
+ stream->AlignTo(DexFile::TryItem::kAlignment);
+ // Write try items.
+ for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
+ DexFile::TryItem disk_try_item;
+ if (!reserve_only) {
+ disk_try_item.start_addr_ = try_item->StartAddr();
+ disk_try_item.insn_count_ = try_item->InsnCount();
+ disk_try_item.handler_off_ = try_item->GetHandlers()->GetListOffset();
+ }
+ stream->Write(&disk_try_item, sizeof(disk_try_item));
+ }
+ // Leave offset pointing to the end of the try items.
+ const size_t offset = stream->Tell();
+ size_t max_offset = offset + stream->WriteUleb128(code_item->Handlers()->size());
+ for (std::unique_ptr<const dex_ir::CatchHandler>& handlers : *code_item->Handlers()) {
+ stream->Seek(offset + handlers->GetListOffset());
+ uint32_t size = handlers->HasCatchAll() ? (handlers->GetHandlers()->size() - 1) * -1 :
+ handlers->GetHandlers()->size();
+ stream->WriteSleb128(size);
+ for (std::unique_ptr<const dex_ir::TypeAddrPair>& handler : *handlers->GetHandlers()) {
+ if (handler->GetTypeId() != nullptr) {
+ stream->WriteUleb128(handler->GetTypeId()->GetIndex());
+ }
+ stream->WriteUleb128(handler->GetAddress());
+ }
+ // TODO: Clean this up to write the handlers in address order.
+ max_offset = std::max(max_offset, stream->Tell());
+ }
+ stream->Seek(max_offset);
+ }
+}
+
+void DexWriter::WriteCodeItem(Stream* stream,
+ dex_ir::CodeItem* code_item,
+ bool reserve_only) {
+ DCHECK(code_item != nullptr);
+ const uint32_t start_offset = stream->Tell();
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeCodeItem));
+ ProcessOffset(stream, code_item);
+
+ StandardDexFile::CodeItem disk_code_item;
+ if (!reserve_only) {
+ disk_code_item.registers_size_ = code_item->RegistersSize();
+ disk_code_item.ins_size_ = code_item->InsSize();
+ disk_code_item.outs_size_ = code_item->OutsSize();
+ disk_code_item.tries_size_ = code_item->TriesSize();
+ disk_code_item.debug_info_off_ = code_item->DebugInfo() == nullptr
+ ? 0
+ : code_item->DebugInfo()->GetOffset();
+ disk_code_item.insns_size_in_code_units_ = code_item->InsnsSize();
+ }
+ // Avoid using sizeof so that we don't write the fake instruction array at the end of the code
+ // item.
+ stream->Write(&disk_code_item, OFFSETOF_MEMBER(StandardDexFile::CodeItem, insns_));
+ // Write the instructions.
+ stream->Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t));
+ // Write the post instruction data.
+ WriteCodeItemPostInstructionData(stream, code_item, reserve_only);
+ if (reserve_only) {
+ stream->Clear(start_offset, stream->Tell() - start_offset);
+ }
+}
+
+void DexWriter::WriteCodeItems(Stream* stream, bool reserve_only) {
DexLayoutSection* code_section = nullptr;
if (!reserve_only && dex_layout_ != nullptr) {
code_section = &dex_layout_->GetSections().sections_[static_cast<size_t>(
DexLayoutSections::SectionType::kSectionTypeCode)];
}
- uint16_t uint16_buffer[4] = {};
- uint32_t uint32_buffer[2] = {};
- uint32_t start = offset;
+ const uint32_t start = stream->Tell();
for (auto& code_item : header_->GetCollections().CodeItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCodeItem));
- ProcessOffset(&offset, code_item.get());
- if (!reserve_only) {
- uint16_buffer[0] = code_item->RegistersSize();
- uint16_buffer[1] = code_item->InsSize();
- uint16_buffer[2] = code_item->OutsSize();
- uint16_buffer[3] = code_item->TriesSize();
- uint32_buffer[0] = code_item->DebugInfo() == nullptr ? 0 :
- code_item->DebugInfo()->GetOffset();
- uint32_buffer[1] = code_item->InsnsSize();
- // Only add the section hotness info once.
- if (code_section != nullptr) {
- auto it = dex_layout_->LayoutHotnessInfo().code_item_layout_.find(code_item.get());
- if (it != dex_layout_->LayoutHotnessInfo().code_item_layout_.end()) {
- code_section->parts_[static_cast<size_t>(it->second)].CombineSection(
- code_item->GetOffset(), code_item->GetOffset() + code_item->GetSize());
- }
- }
- }
- offset += Write(uint16_buffer, 4 * sizeof(uint16_t), offset);
- offset += Write(uint32_buffer, 2 * sizeof(uint32_t), offset);
- offset += Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t), offset);
- if (code_item->TriesSize() != 0) {
- if (code_item->InsnsSize() % 2 != 0) {
- uint16_t padding[1] = { 0 };
- offset += Write(padding, sizeof(uint16_t), offset);
- }
- uint32_t start_addr[1];
- uint16_t insn_count_and_handler_off[2];
- for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
- start_addr[0] = try_item->StartAddr();
- insn_count_and_handler_off[0] = try_item->InsnCount();
- insn_count_and_handler_off[1] = try_item->GetHandlers()->GetListOffset();
- offset += Write(start_addr, sizeof(uint32_t), offset);
- offset += Write(insn_count_and_handler_off, 2 * sizeof(uint16_t), offset);
- }
- // Leave offset pointing to the end of the try items.
- UNUSED(WriteUleb128(code_item->Handlers()->size(), offset));
- for (std::unique_ptr<const dex_ir::CatchHandler>& handlers : *code_item->Handlers()) {
- size_t list_offset = offset + handlers->GetListOffset();
- uint32_t size = handlers->HasCatchAll() ? (handlers->GetHandlers()->size() - 1) * -1 :
- handlers->GetHandlers()->size();
- list_offset += WriteSleb128(size, list_offset);
- for (std::unique_ptr<const dex_ir::TypeAddrPair>& handler : *handlers->GetHandlers()) {
- if (handler->GetTypeId() != nullptr) {
- list_offset += WriteUleb128(handler->GetTypeId()->GetIndex(), list_offset);
- }
- list_offset += WriteUleb128(handler->GetAddress(), list_offset);
- }
+ uint32_t start_offset = stream->Tell();
+ WriteCodeItem(stream, code_item.get(), reserve_only);
+ // Only add the section hotness info once.
+ if (!reserve_only && code_section != nullptr) {
+ auto it = dex_layout_->LayoutHotnessInfo().code_item_layout_.find(code_item.get());
+ if (it != dex_layout_->LayoutHotnessInfo().code_item_layout_.end()) {
+ code_section->parts_[static_cast<size_t>(it->second)].CombineSection(
+ start_offset,
+ stream->Tell());
}
}
- // TODO: Clean this up to properly calculate the size instead of assuming it doesn't change.
- offset = code_item->GetOffset() + code_item->GetSize();
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetCodeItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteClassDefs(uint32_t offset, bool reserve_only) {
- const uint32_t start = offset;
+void DexWriter::WriteClassDefs(Stream* stream, bool reserve_only) {
+ const uint32_t start = stream->Tell();
uint32_t class_def_buffer[8];
for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeClassDefItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeClassDefItem));
if (reserve_only) {
- offset += class_def->GetSize();
+ stream->Skip(class_def->GetSize());
} else {
class_def_buffer[0] = class_def->ClassType()->GetIndex();
class_def_buffer[1] = class_def->GetAccessFlags();
@@ -618,94 +598,86 @@ uint32_t DexWriter::WriteClassDefs(uint32_t offset, bool reserve_only) {
class_def->GetClassData()->GetOffset();
class_def_buffer[7] = class_def->StaticValues() == nullptr ? 0 :
class_def->StaticValues()->GetOffset();
- offset += Write(class_def_buffer, class_def->GetSize(), offset);
+ stream->Write(class_def_buffer, class_def->GetSize());
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetClassDefsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteClassDatas(uint32_t offset) {
- const uint32_t start = offset;
+void DexWriter::WriteClassDatas(Stream* stream) {
+ const uint32_t start = stream->Tell();
for (const std::unique_ptr<dex_ir::ClassData>& class_data :
header_->GetCollections().ClassDatas()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeClassDataItem));
- ProcessOffset(&offset, class_data.get());
- offset += WriteUleb128(class_data->StaticFields()->size(), offset);
- offset += WriteUleb128(class_data->InstanceFields()->size(), offset);
- offset += WriteUleb128(class_data->DirectMethods()->size(), offset);
- offset += WriteUleb128(class_data->VirtualMethods()->size(), offset);
- offset += WriteEncodedFields(class_data->StaticFields(), offset);
- offset += WriteEncodedFields(class_data->InstanceFields(), offset);
- offset += WriteEncodedMethods(class_data->DirectMethods(), offset);
- offset += WriteEncodedMethods(class_data->VirtualMethods(), offset);
- }
- if (compute_offsets_ && start != offset) {
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeClassDataItem));
+ ProcessOffset(stream, class_data.get());
+ stream->WriteUleb128(class_data->StaticFields()->size());
+ stream->WriteUleb128(class_data->InstanceFields()->size());
+ stream->WriteUleb128(class_data->DirectMethods()->size());
+ stream->WriteUleb128(class_data->VirtualMethods()->size());
+ WriteEncodedFields(stream, class_data->StaticFields());
+ WriteEncodedFields(stream, class_data->InstanceFields());
+ WriteEncodedMethods(stream, class_data->DirectMethods());
+ WriteEncodedMethods(stream, class_data->VirtualMethods());
+ }
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetClassDatasOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteCallSiteIds(uint32_t offset, bool reserve_only) {
- const uint32_t start = offset;
+void DexWriter::WriteCallSiteIds(Stream* stream, bool reserve_only) {
+ const uint32_t start = stream->Tell();
uint32_t call_site_off[1];
for (std::unique_ptr<dex_ir::CallSiteId>& call_site_id :
header_->GetCollections().CallSiteIds()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCallSiteIdItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeCallSiteIdItem));
if (reserve_only) {
- offset += call_site_id->GetSize();
+ stream->Skip(call_site_id->GetSize());
} else {
call_site_off[0] = call_site_id->CallSiteItem()->GetOffset();
- offset += Write(call_site_off, call_site_id->GetSize(), offset);
+ stream->Write(call_site_off, call_site_id->GetSize());
}
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetCallSiteIdsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteMethodHandles(uint32_t offset) {
- const uint32_t start = offset;
+void DexWriter::WriteMethodHandles(Stream* stream) {
+ const uint32_t start = stream->Tell();
uint16_t method_handle_buff[4];
for (std::unique_ptr<dex_ir::MethodHandleItem>& method_handle :
header_->GetCollections().MethodHandleItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMethodHandleItem));
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeMethodHandleItem));
method_handle_buff[0] = static_cast<uint16_t>(method_handle->GetMethodHandleType());
method_handle_buff[1] = 0; // unused.
method_handle_buff[2] = method_handle->GetFieldOrMethodId()->GetIndex();
method_handle_buff[3] = 0; // unused.
- offset += Write(method_handle_buff, method_handle->GetSize(), offset);
+ stream->Write(method_handle_buff, method_handle->GetSize());
}
- if (compute_offsets_ && start != offset) {
+ if (compute_offsets_ && start != stream->Tell()) {
header_->GetCollections().SetMethodHandleItemsOffset(start);
}
- return offset - start;
}
-uint32_t DexWriter::WriteMapItems(uint32_t offset, MapItemQueue* queue) {
+void DexWriter::WriteMapItems(Stream* stream, MapItemQueue* queue) {
// All the sections should already have been added.
- uint16_t uint16_buffer[2];
- uint32_t uint32_buffer[2];
- uint16_buffer[1] = 0;
- uint32_buffer[0] = queue->size();
- const uint32_t start = offset;
- offset += Write(uint32_buffer, sizeof(uint32_t), offset);
+ const uint32_t map_list_size = queue->size();
+ stream->Write(&map_list_size, sizeof(map_list_size));
while (!queue->empty()) {
- const MapItem& map_item = queue->top();
- uint16_buffer[0] = map_item.type_;
- uint32_buffer[0] = map_item.size_;
- uint32_buffer[1] = map_item.offset_;
- offset += Write(uint16_buffer, 2 * sizeof(uint16_t), offset);
- offset += Write(uint32_buffer, 2 * sizeof(uint32_t), offset);
+ const MapItem& item = queue->top();
+ DexFile::MapItem map_item;
+ map_item.type_ = item.type_;
+ map_item.size_ = item.size_;
+ map_item.offset_ = item.offset_;
+ map_item.unused_ = 0u;
+ stream->Write(&map_item, sizeof(map_item));
queue->pop();
}
- return offset - start;
}
-uint32_t DexWriter::GenerateAndWriteMapItems(uint32_t offset) {
+void DexWriter::GenerateAndWriteMapItems(Stream* stream) {
dex_ir::Collections& collection = header_->GetCollections();
MapItemQueue queue;
@@ -767,12 +739,10 @@ uint32_t DexWriter::GenerateAndWriteMapItems(uint32_t offset) {
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationsDirectoryItem,
collection.AnnotationsDirectoryItemsSize(),
collection.AnnotationsDirectoryItemsOffset()));
-
- // Write the map items.
- return WriteMapItems(offset, &queue);
+ WriteMapItems(stream, &queue);
}
-void DexWriter::WriteHeader() {
+void DexWriter::WriteHeader(Stream* stream) {
StandardDexFile::Header header;
if (CompactDexFile::IsMagicValid(header_->Magic())) {
StandardDexFile::WriteMagic(header.magic_);
@@ -810,78 +780,97 @@ void DexWriter::WriteHeader() {
CHECK_EQ(sizeof(header), GetHeaderSize());
static_assert(sizeof(header) == 0x70, "Size doesn't match dex spec");
- UNUSED(Write(reinterpret_cast<uint8_t*>(&header), sizeof(header), 0u));
+ stream->Seek(0);
+ stream->Overwrite(reinterpret_cast<uint8_t*>(&header), sizeof(header));
}
size_t DexWriter::GetHeaderSize() const {
return sizeof(StandardDexFile::Header);
}
-void DexWriter::WriteMemMap() {
+void DexWriter::Write(DexContainer* output) {
+ Stream stream_storage(output->GetMainSection());
+ Stream* stream = &stream_storage;
+
// Starting offset is right after the header.
- uint32_t offset = GetHeaderSize();
+ stream->Seek(GetHeaderSize());
dex_ir::Collections& collection = header_->GetCollections();
// Based on: https://source.android.com/devices/tech/dalvik/dex-format
// Since the offsets may not be calculated already, the writing must be done in the correct order.
- const uint32_t string_ids_offset = offset;
- offset += WriteStringIds(offset, /*reserve_only*/ true);
- offset += WriteTypeIds(offset);
- const uint32_t proto_ids_offset = offset;
- offset += WriteProtoIds(offset, /*reserve_only*/ true);
- offset += WriteFieldIds(offset);
- offset += WriteMethodIds(offset);
- const uint32_t class_defs_offset = offset;
- offset += WriteClassDefs(offset, /*reserve_only*/ true);
- const uint32_t call_site_ids_offset = offset;
- offset += WriteCallSiteIds(offset, /*reserve_only*/ true);
- offset += WriteMethodHandles(offset);
+ const uint32_t string_ids_offset = stream->Tell();
+ WriteStringIds(stream, /*reserve_only*/ true);
+ WriteTypeIds(stream);
+ const uint32_t proto_ids_offset = stream->Tell();
+ WriteProtoIds(stream, /*reserve_only*/ true);
+ WriteFieldIds(stream);
+ WriteMethodIds(stream);
+ const uint32_t class_defs_offset = stream->Tell();
+ WriteClassDefs(stream, /*reserve_only*/ true);
+ const uint32_t call_site_ids_offset = stream->Tell();
+ WriteCallSiteIds(stream, /*reserve_only*/ true);
+ WriteMethodHandles(stream);
uint32_t data_offset_ = 0u;
if (compute_offsets_) {
// Data section.
- offset = RoundUp(offset, kDataSectionAlignment);
- data_offset_ = offset;
+ stream->AlignTo(kDataSectionAlignment);
+ data_offset_ = stream->Tell();
}
// Write code item first to minimize the space required for encoded methods.
// Reserve code item space since we need the debug offsets to actually write them.
- const uint32_t code_items_offset = offset;
- offset += WriteCodeItems(offset, /*reserve_only*/ true);
+ const uint32_t code_items_offset = stream->Tell();
+ WriteCodeItems(stream, /*reserve_only*/ true);
// Write debug info section.
- offset += WriteDebugInfoItems(offset);
- // Actually write code items since debug info offsets are calculated now.
- WriteCodeItems(code_items_offset, /*reserve_only*/ false);
-
- offset += WriteEncodedArrays(offset);
- offset += WriteAnnotations(offset);
- offset += WriteAnnotationSets(offset);
- offset += WriteAnnotationSetRefs(offset);
- offset += WriteAnnotationsDirectories(offset);
- offset += WriteTypeLists(offset);
- offset += WriteClassDatas(offset);
- offset += WriteStringDatas(offset);
+ WriteDebugInfoItems(stream);
+ {
+ // Actually write code items since debug info offsets are calculated now.
+ Stream::ScopedSeek seek(stream, code_items_offset);
+ WriteCodeItems(stream, /*reserve_only*/ false);
+ }
+
+ WriteEncodedArrays(stream);
+ WriteAnnotations(stream);
+ WriteAnnotationSets(stream);
+ WriteAnnotationSetRefs(stream);
+ WriteAnnotationsDirectories(stream);
+ WriteTypeLists(stream);
+ WriteClassDatas(stream);
+ WriteStringDatas(stream);
// Write delayed id sections that depend on data sections.
- WriteStringIds(string_ids_offset, /*reserve_only*/ false);
- WriteProtoIds(proto_ids_offset, /*reserve_only*/ false);
- WriteClassDefs(class_defs_offset, /*reserve_only*/ false);
- WriteCallSiteIds(call_site_ids_offset, /*reserve_only*/ false);
+ {
+ Stream::ScopedSeek seek(stream, string_ids_offset);
+ WriteStringIds(stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(stream, proto_ids_offset);
+ WriteProtoIds(stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(stream, class_defs_offset);
+ WriteClassDefs(stream, /*reserve_only*/ false);
+ }
+ {
+ Stream::ScopedSeek seek(stream, call_site_ids_offset);
+ WriteCallSiteIds(stream, /*reserve_only*/ false);
+ }
// Write the map list.
if (compute_offsets_) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMapList));
- collection.SetMapListOffset(offset);
+ stream->AlignTo(SectionAlignment(DexFile::kDexTypeMapList));
+ collection.SetMapListOffset(stream->Tell());
} else {
- offset = collection.MapListOffset();
+ stream->Seek(collection.MapListOffset());
}
- offset += GenerateAndWriteMapItems(offset);
- offset = RoundUp(offset, kDataSectionAlignment);
+ GenerateAndWriteMapItems(stream);
+ stream->AlignTo(kDataSectionAlignment);
// Map items are included in the data section.
if (compute_offsets_) {
- header_->SetDataSize(offset - data_offset_);
+ header_->SetDataSize(stream->Tell() - data_offset_);
if (header_->DataSize() != 0) {
// Offset must be zero when the size is zero.
header_->SetDataOffset(data_offset_);
@@ -895,37 +884,45 @@ void DexWriter::WriteMemMap() {
if (link_data.size() > 0) {
CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
if (compute_offsets_) {
- header_->SetLinkOffset(offset);
+ header_->SetLinkOffset(stream->Tell());
+ } else {
+ stream->Seek(header_->LinkOffset());
}
- offset += Write(&link_data[0], link_data.size(), header_->LinkOffset());
+ stream->Write(&link_data[0], link_data.size());
}
// Write header last.
if (compute_offsets_) {
- header_->SetFileSize(offset);
+ header_->SetFileSize(stream->Tell());
}
- WriteHeader();
+ WriteHeader(stream);
if (dex_layout_->GetOptions().update_checksum_) {
- header_->SetChecksum(DexFile::CalculateChecksum(mem_map_->Begin(), offset));
+ header_->SetChecksum(DexFile::CalculateChecksum(stream->Begin(), header_->FileSize()));
// Rewrite the header with the calculated checksum.
- WriteHeader();
+ WriteHeader(stream);
}
+
+ // Trim the map to make it sized as large as the dex file.
+ output->GetMainSection()->Resize(header_->FileSize());
}
-void DexWriter::Output(dex_ir::Header* header,
- MemMap* mem_map,
- DexLayout* dex_layout,
- bool compute_offsets,
- CompactDexLevel compact_dex_level) {
+void DexWriter::Output(DexLayout* dex_layout,
+ std::unique_ptr<DexContainer>* container,
+ bool compute_offsets) {
CHECK(dex_layout != nullptr);
std::unique_ptr<DexWriter> writer;
- if (compact_dex_level != CompactDexLevel::kCompactDexLevelNone) {
- writer.reset(new CompactDexWriter(header, mem_map, dex_layout, compact_dex_level));
+ if (dex_layout->GetOptions().compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
+ CHECK(compute_offsets) << "Compact dex requires computing offsets";
+ writer.reset(new CompactDexWriter(dex_layout));
} else {
- writer.reset(new DexWriter(header, mem_map, dex_layout, compute_offsets));
+ writer.reset(new DexWriter(dex_layout, compute_offsets));
}
- writer->WriteMemMap();
+ DCHECK(container != nullptr);
+ if (*container == nullptr) {
+ *container = writer->CreateDexContainer();
+ }
+ writer->Write(container->get());
}
void MapItemQueue::AddIfNotEmpty(const MapItem& item) {
@@ -934,4 +931,17 @@ void MapItemQueue::AddIfNotEmpty(const MapItem& item) {
}
}
+void DexWriter::ProcessOffset(Stream* stream, dex_ir::Item* item) {
+ if (compute_offsets_) {
+ item->SetOffset(stream->Tell());
+ } else {
+ // Not computing offsets, just use the one in the item.
+ stream->Seek(item->GetOffset());
+ }
+}
+
+std::unique_ptr<DexContainer> DexWriter::CreateDexContainer() const {
+ return std::unique_ptr<DexContainer>(new DexWriter::Container);
+}
+
} // namespace art
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index 92a002edc7..5df11116ee 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -20,9 +20,12 @@
#define ART_DEXLAYOUT_DEX_WRITER_H_
#include <functional>
+#include <memory> // For unique_ptr
#include "base/unix_file/fd_file.h"
#include "dex/compact_dex_level.h"
+#include "dex_container.h"
+#include "dex/dex_file.h"
#include "dex_ir.h"
#include "mem_map.h"
#include "os.h"
@@ -38,7 +41,7 @@ struct MapItem {
// Not using DexFile::MapItemType since compact dex and standard dex file may have different
// sections.
MapItem() = default;
- MapItem(uint32_t type, uint32_t size, uint32_t offset)
+ MapItem(uint32_t type, uint32_t size, size_t offset)
: type_(type), size_(size), offset_(offset) { }
// Sort by decreasing order since the priority_queue puts largest elements first.
@@ -59,78 +62,216 @@ class MapItemQueue : public
class DexWriter {
public:
- DexWriter(dex_ir::Header* header,
- MemMap* mem_map,
- DexLayout* dex_layout,
- bool compute_offsets)
- : header_(header),
- mem_map_(mem_map),
- dex_layout_(dex_layout),
- compute_offsets_(compute_offsets) {}
-
- static void Output(dex_ir::Header* header,
- MemMap* mem_map,
- DexLayout* dex_layout,
- bool compute_offsets,
- CompactDexLevel compact_dex_level);
+ static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
+ static constexpr uint32_t kDexSectionWordAlignment = 4;
+
+ // Stream that writes into a dex container section. Do not have two streams pointing to the same
+ // backing storage as there may be invalidation of backing storage to resize the section.
+ // Random access stream (consider refactoring).
+ class Stream {
+ public:
+ explicit Stream(DexContainer::Section* section) : section_(section) {
+ SyncWithSection();
+ }
+
+ const uint8_t* Begin() const {
+ return data_;
+ }
+
+ // Functions are not virtual (yet) for speed.
+ size_t Tell() const {
+ return position_;
+ }
+
+ void Seek(size_t position) {
+ position_ = position;
+ EnsureStorage(0u);
+ }
+
+ // Does not allow overwriting for bug prevention purposes.
+ ALWAYS_INLINE size_t Write(const void* buffer, size_t length) {
+ EnsureStorage(length);
+ for (size_t i = 0; i < length; ++i) {
+ DCHECK_EQ(data_[position_ + i], 0u);
+ }
+ memcpy(&data_[position_], buffer, length);
+ position_ += length;
+ return length;
+ }
+
+ ALWAYS_INLINE size_t Overwrite(const void* buffer, size_t length) {
+ EnsureStorage(length);
+ memcpy(&data_[position_], buffer, length);
+ position_ += length;
+ return length;
+ }
+
+ ALWAYS_INLINE size_t Clear(size_t position, size_t length) {
+ EnsureStorage(length);
+ memset(&data_[position], 0, length);
+ return length;
+ }
+
+ ALWAYS_INLINE size_t WriteSleb128(int32_t value) {
+ EnsureStorage(8);
+ uint8_t* ptr = &data_[position_];
+ const size_t len = EncodeSignedLeb128(ptr, value) - ptr;
+ position_ += len;
+ return len;
+ }
+
+ ALWAYS_INLINE size_t WriteUleb128(uint32_t value) {
+ EnsureStorage(8);
+ uint8_t* ptr = &data_[position_];
+ const size_t len = EncodeUnsignedLeb128(ptr, value) - ptr;
+ position_ += len;
+ return len;
+ }
+
+ ALWAYS_INLINE void AlignTo(const size_t alignment) {
+ position_ = RoundUp(position_, alignment);
+ EnsureStorage(0u);
+ }
+
+ ALWAYS_INLINE void Skip(const size_t count) {
+ position_ += count;
+ EnsureStorage(0u);
+ }
+
+ class ScopedSeek {
+ public:
+ ScopedSeek(Stream* stream, uint32_t offset) : stream_(stream), offset_(stream->Tell()) {
+ stream->Seek(offset);
+ }
+
+ ~ScopedSeek() {
+ stream_->Seek(offset_);
+ }
+
+ private:
+ Stream* const stream_;
+ const uint32_t offset_;
+ };
+
+ private:
+ ALWAYS_INLINE void EnsureStorage(size_t length) {
+ size_t end = position_ + length;
+ while (UNLIKELY(end > data_size_)) {
+ section_->Resize(data_size_ * 3 / 2 + 1);
+ SyncWithSection();
+ }
+ }
+
+ void SyncWithSection() {
+ data_ = section_->Begin();
+ data_size_ = section_->Size();
+ }
+
+ // Current position of the stream.
+ size_t position_ = 0u;
+ DexContainer::Section* const section_ = nullptr;
+ // Cached Begin() from the container to provide faster accesses.
+ uint8_t* data_ = nullptr;
+ // Cached Size from the container to provide faster accesses.
+ size_t data_size_ = 0u;
+ };
+
+ static inline constexpr uint32_t SectionAlignment(DexFile::MapItemType type) {
+ switch (type) {
+ case DexFile::kDexTypeClassDataItem:
+ case DexFile::kDexTypeStringDataItem:
+ case DexFile::kDexTypeDebugInfoItem:
+ case DexFile::kDexTypeAnnotationItem:
+ case DexFile::kDexTypeEncodedArrayItem:
+ return alignof(uint8_t);
+
+ default:
+ // All other sections are kDexAlignedSection.
+ return DexWriter::kDexSectionWordAlignment;
+ }
+ }
+
+ class Container : public DexContainer {
+ public:
+ Section* GetMainSection() OVERRIDE {
+ return &main_section_;
+ }
+
+ Section* GetDataSection() OVERRIDE {
+ return &data_section_;
+ }
+
+ bool IsCompactDexContainer() const OVERRIDE {
+ return false;
+ }
+
+ private:
+ VectorSection main_section_;
+ VectorSection data_section_;
+
+ friend class CompactDexWriter;
+ };
+
+ DexWriter(DexLayout* dex_layout, bool compute_offsets);
+
+ static void Output(DexLayout* dex_layout,
+ std::unique_ptr<DexContainer>* container,
+ bool compute_offsets);
virtual ~DexWriter() {}
protected:
- void WriteMemMap();
-
- size_t Write(const void* buffer, size_t length, size_t offset) WARN_UNUSED;
- size_t WriteSleb128(uint32_t value, size_t offset) WARN_UNUSED;
- size_t WriteUleb128(uint32_t value, size_t offset) WARN_UNUSED;
- size_t WriteEncodedValue(dex_ir::EncodedValue* encoded_value, size_t offset) WARN_UNUSED;
- size_t WriteEncodedValueHeader(int8_t value_type, size_t value_arg, size_t offset) WARN_UNUSED;
- size_t WriteEncodedArray(dex_ir::EncodedValueVector* values, size_t offset) WARN_UNUSED;
- size_t WriteEncodedAnnotation(dex_ir::EncodedAnnotation* annotation, size_t offset) WARN_UNUSED;
- size_t WriteEncodedFields(dex_ir::FieldItemVector* fields, size_t offset) WARN_UNUSED;
- size_t WriteEncodedMethods(dex_ir::MethodItemVector* methods, size_t offset) WARN_UNUSED;
+ virtual void Write(DexContainer* output);
+ virtual std::unique_ptr<DexContainer> CreateDexContainer() const;
+
+ void WriteEncodedValue(Stream* stream, dex_ir::EncodedValue* encoded_value);
+ void WriteEncodedValueHeader(Stream* stream, int8_t value_type, size_t value_arg);
+ void WriteEncodedArray(Stream* stream, dex_ir::EncodedValueVector* values);
+ void WriteEncodedAnnotation(Stream* stream, dex_ir::EncodedAnnotation* annotation);
+ void WriteEncodedFields(Stream* stream, dex_ir::FieldItemVector* fields);
+ void WriteEncodedMethods(Stream* stream, dex_ir::MethodItemVector* methods);
// Header and id section
- virtual void WriteHeader();
+ virtual void WriteHeader(Stream* stream);
virtual size_t GetHeaderSize() const;
// reserve_only means don't write, only reserve space. This is required since the string data
// offsets must be assigned.
- uint32_t WriteStringIds(uint32_t offset, bool reserve_only);
- uint32_t WriteTypeIds(uint32_t offset);
- uint32_t WriteProtoIds(uint32_t offset, bool reserve_only);
- uint32_t WriteFieldIds(uint32_t offset);
- uint32_t WriteMethodIds(uint32_t offset);
- uint32_t WriteClassDefs(uint32_t offset, bool reserve_only);
- uint32_t WriteCallSiteIds(uint32_t offset, bool reserve_only);
-
- uint32_t WriteEncodedArrays(uint32_t offset);
- uint32_t WriteAnnotations(uint32_t offset);
- uint32_t WriteAnnotationSets(uint32_t offset);
- uint32_t WriteAnnotationSetRefs(uint32_t offset);
- uint32_t WriteAnnotationsDirectories(uint32_t offset);
+ void WriteStringIds(Stream* stream, bool reserve_only);
+ void WriteTypeIds(Stream* stream);
+ void WriteProtoIds(Stream* stream, bool reserve_only);
+ void WriteFieldIds(Stream* stream);
+ void WriteMethodIds(Stream* stream);
+ void WriteClassDefs(Stream* stream, bool reserve_only);
+ void WriteCallSiteIds(Stream* stream, bool reserve_only);
+
+ void WriteEncodedArrays(Stream* stream);
+ void WriteAnnotations(Stream* stream);
+ void WriteAnnotationSets(Stream* stream);
+ void WriteAnnotationSetRefs(Stream* stream);
+ void WriteAnnotationsDirectories(Stream* stream);
// Data section.
- uint32_t WriteDebugInfoItems(uint32_t offset);
- uint32_t WriteCodeItems(uint32_t offset, bool reserve_only);
- uint32_t WriteTypeLists(uint32_t offset);
- uint32_t WriteStringDatas(uint32_t offset);
- uint32_t WriteClassDatas(uint32_t offset);
- uint32_t WriteMethodHandles(uint32_t offset);
- uint32_t WriteMapItems(uint32_t offset, MapItemQueue* queue);
- uint32_t GenerateAndWriteMapItems(uint32_t offset);
+ void WriteDebugInfoItems(Stream* stream);
+ void WriteCodeItems(Stream* stream, bool reserve_only);
+ void WriteTypeLists(Stream* stream);
+ void WriteStringDatas(Stream* stream);
+ void WriteClassDatas(Stream* stream);
+ void WriteMethodHandles(Stream* stream);
+ void WriteMapItems(Stream* stream, MapItemQueue* queue);
+ void GenerateAndWriteMapItems(Stream* stream);
+
+ virtual void WriteCodeItemPostInstructionData(Stream* stream,
+ dex_ir::CodeItem* item,
+ bool reserve_only);
+ virtual void WriteCodeItem(Stream* stream, dex_ir::CodeItem* item, bool reserve_only);
+ virtual void WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_info);
+ virtual void WriteStringData(Stream* stream, dex_ir::StringData* string_data);
// Process an offset, if compute_offset is set, write into the dex ir item, otherwise read the
// existing offset and use that for writing.
- void ProcessOffset(uint32_t* const offset, dex_ir::Item* item) {
- if (compute_offsets_) {
- item->SetOffset(*offset);
- } else {
- // Not computing offsets, just use the one in the item.
- *offset = item->GetOffset();
- }
- }
+ void ProcessOffset(Stream* stream, dex_ir::Item* item);
dex_ir::Header* const header_;
- MemMap* const mem_map_;
DexLayout* const dex_layout_;
bool compute_offsets_;
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index 99b1f38f73..c0d6f02c00 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -29,6 +29,7 @@
#include "base/logging.h" // For InitLogging.
#include "base/stringpiece.h"
+#include "dexlayout.h"
#include "dex/dex_file.h"
#include "dex_ir.h"
#include "dex_ir_builder.h"
@@ -290,8 +291,10 @@ static void ProcessOneDexMapping(uint64_t* pagemap,
// Build a list of the dex file section types, sorted from highest offset to lowest.
std::vector<dex_ir::DexFileSection> sections;
{
+ Options options;
std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file,
- /*eagerly_assign_offsets*/ true));
+ /*eagerly_assign_offsets*/ true,
+ options));
sections = dex_ir::GetSortedDexFileSections(header.get(),
dex_ir::SortDirection::kSortDescending);
}
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 000d1356b9..91d35ff6d7 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -34,6 +34,7 @@
#include "android-base/stringprintf.h"
#include "base/logging.h" // For VLOG_IS_ON.
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_layout.h"
#include "dex/dex_file_loader.h"
@@ -1051,7 +1052,7 @@ void DexLayout::DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32
for (const DexInstructionPcPair& inst : code->Instructions()) {
const uint32_t insn_width = inst->SizeInCodeUnits();
if (insn_width == 0) {
- fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", inst.DexPc());
+ LOG(WARNING) << "GLITCH: zero-width instruction at idx=0x" << std::hex << inst.DexPc();
break;
}
DumpInstruction(code, code_offset, inst.DexPc(), insn_width, &inst.Inst());
@@ -1219,7 +1220,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
fprintf(out_file_, "<method name=\"%s\"\n", name);
const char* return_type = strrchr(type_descriptor, ')');
if (return_type == nullptr) {
- fprintf(stderr, "bad method type descriptor '%s'\n", type_descriptor);
+ LOG(ERROR) << "bad method type descriptor '" << type_descriptor << "'";
goto bail;
}
std::string dot(DescriptorToDotWrapper(return_type + 1));
@@ -1238,7 +1239,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
// Parameters.
if (type_descriptor[0] != '(') {
- fprintf(stderr, "ERROR: bad descriptor '%s'\n", type_descriptor);
+ LOG(ERROR) << "ERROR: bad descriptor '" << type_descriptor << "'";
goto bail;
}
char* tmp_buf = reinterpret_cast<char*>(malloc(strlen(type_descriptor) + 1));
@@ -1257,7 +1258,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
} else {
// Primitive char, copy it.
if (strchr("ZBCSIFJD", *base) == nullptr) {
- fprintf(stderr, "ERROR: bad method signature '%s'\n", base);
+ LOG(ERROR) << "ERROR: bad method signature '" << base << "'";
break; // while
}
*cp++ = *base++;
@@ -1367,7 +1368,7 @@ void DexLayout::DumpClass(int idx, char** last_package) {
if (!(class_descriptor[0] == 'L' &&
class_descriptor[strlen(class_descriptor)-1] == ';')) {
// Arrays and primitives should not be defined explicitly. Keep going?
- fprintf(stderr, "Malformed class name '%s'\n", class_descriptor);
+ LOG(ERROR) << "Malformed class name '" << class_descriptor << "'";
} else if (options_.output_format_ == kOutputXml) {
char* mangle = strdup(class_descriptor + 1);
mangle[strlen(mangle)-1] = '\0';
@@ -1812,16 +1813,14 @@ void DexLayout::LayoutOutputFile(const DexFile* dex_file) {
LayoutCodeItems(dex_file);
}
-void DexLayout::OutputDexFile(const DexFile* dex_file, bool compute_offsets) {
- const std::string& dex_file_location = dex_file->GetLocation();
+void DexLayout::OutputDexFile(const DexFile* input_dex_file,
+ bool compute_offsets,
+ std::unique_ptr<DexContainer>* dex_container) {
+ const std::string& dex_file_location = input_dex_file->GetLocation();
std::string error_msg;
std::unique_ptr<File> new_file;
- // Since we allow dex growth, we need to size the map larger than the original input to be safe.
- // Reserve an extra 10% to add some buffer room. Note that this is probably more than
- // necessary.
- constexpr size_t kReserveFraction = 10;
- const size_t max_size = header_->FileSize() + header_->FileSize() / kReserveFraction;
- if (!options_.output_to_memmap_) {
+ // If options_.output_dex_directory_ is non null, we are outputting to a file.
+ if (options_.output_dex_directory_ != nullptr) {
std::string output_location(options_.output_dex_directory_);
size_t last_slash = dex_file_location.rfind('/');
std::string dex_file_directory = dex_file_location.substr(0, last_slash + 1);
@@ -1837,31 +1836,21 @@ void DexLayout::OutputDexFile(const DexFile* dex_file, bool compute_offsets) {
LOG(ERROR) << "Could not create dex writer output file: " << output_location;
return;
}
- if (ftruncate(new_file->Fd(), max_size) != 0) {
- LOG(ERROR) << "Could not grow dex writer output file: " << output_location;;
+ }
+ DexWriter::Output(this, dex_container, compute_offsets);
+ if (new_file != nullptr) {
+ DexContainer* const container = dex_container->get();
+ DexContainer::Section* const main_section = container->GetMainSection();
+ if (!new_file->WriteFully(main_section->Begin(), main_section->Size())) {
+ LOG(ERROR) << "Failed to write main section for dex file " << dex_file_location;
new_file->Erase();
return;
}
- mem_map_.reset(MemMap::MapFile(max_size, PROT_READ | PROT_WRITE, MAP_SHARED,
- new_file->Fd(), 0, /*low_4gb*/ false, output_location.c_str(), &error_msg));
- } else {
- mem_map_.reset(MemMap::MapAnonymous("layout dex", nullptr, max_size,
- PROT_READ | PROT_WRITE, /* low_4gb */ false, /* reuse */ false, &error_msg));
- }
- if (mem_map_ == nullptr) {
- LOG(ERROR) << "Could not create mem map for dex writer output: " << error_msg;
- if (new_file != nullptr) {
+ DexContainer::Section* const data_section = container->GetDataSection();
+ if (!new_file->WriteFully(data_section->Begin(), data_section->Size())) {
+ LOG(ERROR) << "Failed to write data section for dex file " << dex_file_location;
new_file->Erase();
- }
- return;
- }
- DexWriter::Output(header_, mem_map_.get(), this, compute_offsets, options_.compact_dex_level_);
- if (new_file != nullptr) {
- // Since we make the memmap larger than needed, shrink the file back down to not leave extra
- // padding.
- int res = new_file->SetLength(header_->FileSize());
- if (res != 0) {
- LOG(ERROR) << "Truncating file resulted in " << res;
+ return;
}
UNUSED(new_file->FlushCloseOrErase());
}
@@ -1872,8 +1861,11 @@ void DexLayout::OutputDexFile(const DexFile* dex_file, bool compute_offsets) {
*/
void DexLayout::ProcessDexFile(const char* file_name,
const DexFile* dex_file,
- size_t dex_file_index) {
- const bool output = options_.output_dex_directory_ != nullptr || options_.output_to_memmap_;
+ size_t dex_file_index,
+ std::unique_ptr<DexContainer>* dex_container) {
+ const bool has_output_container = dex_container != nullptr;
+ const bool output = options_.output_dex_directory_ != nullptr || has_output_container;
+
// Try to avoid eagerly assigning offsets to find bugs since GetOffset will abort if the offset
// is unassigned.
bool eagerly_assign_offsets = false;
@@ -1881,7 +1873,9 @@ void DexLayout::ProcessDexFile(const char* file_name,
// These options required the offsets for dumping purposes.
eagerly_assign_offsets = true;
}
- std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file, eagerly_assign_offsets));
+ std::unique_ptr<dex_ir::Header> header(dex_ir::DexIrBuilder(*dex_file,
+ eagerly_assign_offsets,
+ GetOptions()));
SetHeader(header.get());
if (options_.verbose_) {
@@ -1912,25 +1906,41 @@ void DexLayout::ProcessDexFile(const char* file_name,
if (do_layout) {
LayoutOutputFile(dex_file);
}
+ // The output needs a dex container, use a temporary one.
+ std::unique_ptr<DexContainer> temp_container;
+ if (dex_container == nullptr) {
+ dex_container = &temp_container;
+ }
// If we didn't set the offsets eagerly, we definitely need to compute them here.
- OutputDexFile(dex_file, do_layout || !eagerly_assign_offsets);
+ OutputDexFile(dex_file, do_layout || !eagerly_assign_offsets, dex_container);
// Clear header before verifying to reduce peak RAM usage.
const size_t file_size = header_->FileSize();
header.reset();
// Verify the output dex file's structure, only enabled by default for debug builds.
- if (options_.verify_output_) {
+ if (options_.verify_output_ && has_output_container) {
std::string error_msg;
std::string location = "memory mapped file for " + std::string(file_name);
- std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
- file_size,
- location,
- /* checksum */ 0,
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg));
+ // Dex file verifier cannot handle compact dex.
+ bool verify = options_.compact_dex_level_ == CompactDexLevel::kCompactDexLevelNone;
+ const ArtDexFileLoader dex_file_loader;
+ DexContainer::Section* const main_section = (*dex_container)->GetMainSection();
+ DexContainer::Section* const data_section = (*dex_container)->GetDataSection();
+ DCHECK_EQ(file_size, main_section->Size())
+ << main_section->Size() << " " << data_section->Size();
+ std::unique_ptr<const DexFile> output_dex_file(
+ dex_file_loader.OpenWithDataSection(
+ main_section->Begin(),
+ main_section->Size(),
+ data_section->Begin(),
+ data_section->Size(),
+ location,
+ /* checksum */ 0,
+ /*oat_dex_file*/ nullptr,
+ verify,
+ /*verify_checksum*/ false,
+ &error_msg));
CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
// Do IR-level comparison between input and output. This check ignores potential differences
@@ -1940,10 +1950,12 @@ void DexLayout::ProcessDexFile(const char* file_name,
// Regenerate output IR to catch any bugs that might happen during writing.
std::unique_ptr<dex_ir::Header> output_header(
dex_ir::DexIrBuilder(*output_dex_file,
- /*eagerly_assign_offsets*/ true));
+ /*eagerly_assign_offsets*/ true,
+ GetOptions()));
std::unique_ptr<dex_ir::Header> orig_header(
dex_ir::DexIrBuilder(*dex_file,
- /*eagerly_assign_offsets*/ true));
+ /*eagerly_assign_offsets*/ true,
+ GetOptions()));
CHECK(VerifyOutputDexFile(output_header.get(), orig_header.get(), &error_msg)) << error_msg;
}
}
@@ -1961,13 +1973,13 @@ int DexLayout::ProcessFile(const char* file_name) {
// all of which are Zip archives with "classes.dex" inside.
const bool verify_checksum = !options_.ignore_bad_checksum_;
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
+ if (!dex_file_loader.Open(
file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
- fputs(error_msg.c_str(), stderr);
- fputc('\n', stderr);
+ LOG(ERROR) << error_msg;
return -1;
}
@@ -1977,7 +1989,8 @@ int DexLayout::ProcessFile(const char* file_name) {
fprintf(out_file_, "Checksum verified\n");
} else {
for (size_t i = 0; i < dex_files.size(); i++) {
- ProcessDexFile(file_name, dex_files[i].get(), i);
+ // Pass in a null container to avoid output by default.
+ ProcessDexFile(file_name, dex_files[i].get(), i, /*dex_container*/ nullptr);
}
}
return 0;
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 25afb773bd..5635271dc1 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -28,6 +28,7 @@
#include <unordered_map>
#include "dex/compact_dex_level.h"
+#include "dex_container.h"
#include "dex/dex_file_layout.h"
#include "dex_ir.h"
#include "mem_map.h"
@@ -55,7 +56,7 @@ class Options {
bool disassemble_ = false;
bool exports_only_ = false;
bool ignore_bad_checksum_ = false;
- bool output_to_memmap_ = false;
+ bool output_to_container_ = false;
bool show_annotations_ = false;
bool show_file_headers_ = false;
bool show_section_headers_ = false;
@@ -65,10 +66,14 @@ class Options {
bool visualize_pattern_ = false;
bool update_checksum_ = false;
CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+ bool dedupe_code_items_ = true;
OutputFormat output_format_ = kOutputPlain;
const char* output_dex_directory_ = nullptr;
const char* output_file_name_ = nullptr;
const char* profile_file_name_ = nullptr;
+ // Filter that removes classes that don't have a matching descriptor (during IR creation).
+ // This speeds up cases when the output only requires a single class.
+ std::set<std::string> class_filter_;
};
// Hotness info
@@ -80,6 +85,18 @@ class DexLayoutHotnessInfo {
class DexLayout {
public:
+ class VectorOutputContainer {
+ public:
+ // Begin is not necessarily aligned (for now).
+ uint8_t* Begin() {
+ return &data_[0];
+ }
+
+ private:
+ std::vector<uint8_t> data_;
+ };
+
+
// Setting this to false disables class def layout entirely, which is stronger than strictly
// necessary to ensure the partial order w.r.t. class derivation. TODO: Re-enable (b/68317550).
static constexpr bool kChangeClassDefOrder = false;
@@ -87,18 +104,21 @@ class DexLayout {
DexLayout(Options& options,
ProfileCompilationInfo* info,
FILE* out_file,
- dex_ir::Header*
- header = nullptr)
- : options_(options), info_(info), out_file_(out_file), header_(header) { }
+ dex_ir::Header* header)
+ : options_(options),
+ info_(info),
+ out_file_(out_file),
+ header_(header) { }
int ProcessFile(const char* file_name);
- void ProcessDexFile(const char* file_name, const DexFile* dex_file, size_t dex_file_index);
+ void ProcessDexFile(const char* file_name,
+ const DexFile* dex_file,
+ size_t dex_file_index,
+ std::unique_ptr<DexContainer>* dex_container);
dex_ir::Header* GetHeader() const { return header_; }
void SetHeader(dex_ir::Header* header) { header_ = header; }
- MemMap* GetAndReleaseMemMap() { return mem_map_.release(); }
-
DexLayoutSections& GetSections() {
return dex_sections_;
}
@@ -148,7 +168,9 @@ class DexLayout {
// Creates a new layout for the dex file based on profile info.
// Currently reorders ClassDefs, ClassDataItems, and CodeItems.
void LayoutOutputFile(const DexFile* dex_file);
- void OutputDexFile(const DexFile* dex_file, bool compute_offsets);
+ void OutputDexFile(const DexFile* input_dex_file,
+ bool compute_offsets,
+ std::unique_ptr<DexContainer>* dex_container);
void DumpCFG(const DexFile* dex_file, int idx);
void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code);
@@ -157,7 +179,6 @@ class DexLayout {
ProfileCompilationInfo* info_;
FILE* out_file_;
dex_ir::Header* header_;
- std::unique_ptr<MemMap> mem_map_;
DexLayoutSections dex_sections_;
// Layout hotness information is only calculated when dexlayout is enabled.
DexLayoutHotnessInfo layout_hotness_info_;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 5bb7196531..f30cfee4ec 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -44,24 +44,26 @@ static const char* kProgramName = "dexlayout";
* Shows usage.
*/
static void Usage(void) {
- fprintf(stderr, "Copyright (C) 2016 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
- " [-s] [-t] [-v] [-w directory] dexfile...\n\n", kProgramName);
- fprintf(stderr, " -a : display annotations\n");
- fprintf(stderr, " -b : build dex_ir\n");
- fprintf(stderr, " -c : verify checksum and exit\n");
- fprintf(stderr, " -d : disassemble code sections\n");
- fprintf(stderr, " -e : display exported items only\n");
- fprintf(stderr, " -f : display summary information from file header\n");
- fprintf(stderr, " -h : display file header details\n");
- fprintf(stderr, " -i : ignore checksum failures\n");
- fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
- fprintf(stderr, " -o : output file name (defaults to stdout)\n");
- fprintf(stderr, " -p : profile file name (defaults to no profile)\n");
- fprintf(stderr, " -s : visualize reference pattern\n");
- fprintf(stderr, " -t : display file section sizes\n");
- fprintf(stderr, " -v : verify output file is canonical to input (IR level comparison)\n");
- fprintf(stderr, " -w : output dex directory \n");
+ LOG(ERROR) << "Copyright (C) 2016 The Android Open Source Project\n";
+ LOG(ERROR) << kProgramName
+ << ": [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]"
+ " [-s] [-t] [-v] [-w directory] dexfile...\n";
+ LOG(ERROR) << " -a : display annotations";
+ LOG(ERROR) << " -b : build dex_ir";
+ LOG(ERROR) << " -c : verify checksum and exit";
+ LOG(ERROR) << " -d : disassemble code sections";
+ LOG(ERROR) << " -e : display exported items only";
+ LOG(ERROR) << " -f : display summary information from file header";
+ LOG(ERROR) << " -h : display file header details";
+ LOG(ERROR) << " -i : ignore checksum failures";
+ LOG(ERROR) << " -l : output layout, either 'plain' or 'xml'";
+ LOG(ERROR) << " -o : output file name (defaults to stdout)";
+ LOG(ERROR) << " -p : profile file name (defaults to no profile)";
+ LOG(ERROR) << " -s : visualize reference pattern";
+ LOG(ERROR) << " -t : display file section sizes";
+ LOG(ERROR) << " -v : verify output file is canonical to input (IR level comparison)";
+ LOG(ERROR) << " -w : output dex directory";
+ LOG(ERROR) << " -x : compact dex generation level, either 'none' or 'fast'";
}
/*
@@ -79,7 +81,7 @@ int DexlayoutDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "abcdefghil:mo:p:stvw:");
+ const int ic = getopt(argc, argv, "abcdefghil:o:p:stvw:x:");
if (ic < 0) {
break; // done
}
@@ -118,9 +120,6 @@ int DexlayoutDriver(int argc, char** argv) {
want_usage = true;
}
break;
- case 'm': // output dex files to a memmap
- options.output_to_memmap_ = true;
- break;
case 'o': // output file
options.output_file_name_ = optarg;
break;
@@ -141,6 +140,15 @@ int DexlayoutDriver(int argc, char** argv) {
case 'w': // output dex files directory
options.output_dex_directory_ = optarg;
break;
+ case 'x': // compact dex level
+ if (strcmp(optarg, "none") == 0) {
+ options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+ } else if (strcmp(optarg, "fast") == 0) {
+ options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelFast;
+ } else {
+ want_usage = true;
+ }
+ break;
default:
want_usage = true;
break;
@@ -149,11 +157,11 @@ int DexlayoutDriver(int argc, char** argv) {
// Detect early problems.
if (optind == argc) {
- fprintf(stderr, "%s: no file specified\n", kProgramName);
+ LOG(ERROR) << "no file specified";
want_usage = true;
}
if (options.checksum_only_ && options.ignore_bad_checksum_) {
- fprintf(stderr, "Can't specify both -c and -i\n");
+ LOG(ERROR) << "Can't specify both -c and -i";
want_usage = true;
}
if (want_usage) {
@@ -166,7 +174,7 @@ int DexlayoutDriver(int argc, char** argv) {
if (options.output_file_name_) {
out_file = fopen(options.output_file_name_, "w");
if (!out_file) {
- fprintf(stderr, "Can't open %s\n", options.output_file_name_);
+ PLOG(ERROR) << "Can't open " << options.output_file_name_;
return 1;
}
}
@@ -176,18 +184,18 @@ int DexlayoutDriver(int argc, char** argv) {
if (options.profile_file_name_) {
int profile_fd = open(options.profile_file_name_, O_RDONLY);
if (profile_fd < 0) {
- fprintf(stderr, "Can't open %s\n", options.profile_file_name_);
+ PLOG(ERROR) << "Can't open " << options.profile_file_name_;
return 1;
}
profile_info.reset(new ProfileCompilationInfo());
if (!profile_info->Load(profile_fd)) {
- fprintf(stderr, "Can't read profile info from %s\n", options.profile_file_name_);
+ LOG(ERROR) << "Can't read profile info from " << options.profile_file_name_;
return 1;
}
}
// Create DexLayout instance.
- DexLayout dex_layout(options, profile_info.get(), out_file);
+ DexLayout dex_layout(options, profile_info.get(), out_file, /*header*/ nullptr);
// Process all files supplied on command line.
int result = 0;
@@ -206,5 +214,8 @@ int DexlayoutDriver(int argc, char** argv) {
} // namespace art
int main(int argc, char** argv) {
+ // Output all logging to stderr.
+ android::base::SetLogger(android::base::StderrLogger);
+
return art::DexlayoutDriver(argc, argv);
}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index 5da3b1d366..bebdc202e7 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -23,9 +23,11 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "dexlayout.h"
#include "exec_utils.h"
#include "jit/profile_compilation_info.h"
#include "utils.h"
@@ -219,6 +221,12 @@ static const char kDuplicateCodeItemInputDex[] =
"AHAAAAACAAAAAwAAAIwAAAADAAAAAQAAAJgAAAAFAAAABAAAAKQAAAAGAAAAAQAAAMQAAAABIAAA"
"AwAAAOQAAAACIAAABwAAACQBAAADIAAAAwAAAFYBAAAAIAAAAQAAAGUBAAAAEAAAAQAAAHgBAAA=";
+// Returns the default compact dex option for dexlayout based on kDefaultCompactDexLevel.
+static std::vector<std::string> DefaultCompactDexOption() {
+ return (kDefaultCompactDexLevel == CompactDexLevel::kCompactDexLevelFast) ?
+ std::vector<std::string>{"-x", "fast"} : std::vector<std::string>{"-x", "none"};
+}
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -287,7 +295,7 @@ class DexLayoutTest : public CommonRuntimeTest {
for (const std::string &dex_file : GetLibCoreDexFileNames()) {
std::vector<std::string> dexlayout_args =
{ "-w", tmp_dir, "-o", tmp_name, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
size_t dex_file_last_slash = dex_file.rfind('/');
@@ -302,12 +310,10 @@ class DexLayoutTest : public CommonRuntimeTest {
if (!::art::Exec(diff_exec_argv, error_msg)) {
return false;
}
- std::vector<std::string> rm_zip_exec_argv = { "/bin/rm", tmp_dir + "classes.dex" };
- if (!::art::Exec(rm_zip_exec_argv, error_msg)) {
+ if (!UnlinkFile(tmp_dir + "classes.dex")) {
return false;
}
- std::vector<std::string> rm_out_exec_argv = { "/bin/rm", tmp_dir + dex_file_name };
- if (!::art::Exec(rm_out_exec_argv, error_msg)) {
+ if (!UnlinkFile(tmp_dir + dex_file_name)) {
return false;
}
}
@@ -318,12 +324,13 @@ class DexLayoutTest : public CommonRuntimeTest {
bool MutateDexFile(File* output_dex, const std::string& input_jar, const Mutator& mutator) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- CHECK(DexFileLoader::Open(input_jar.c_str(),
- input_jar.c_str(),
- /*verify*/ true,
- /*verify_checksum*/ true,
- &error_msg,
- &dex_files)) << error_msg;
+ const ArtDexFileLoader dex_file_loader;
+ CHECK(dex_file_loader.Open(input_jar.c_str(),
+ input_jar.c_str(),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg,
+ &dex_files)) << error_msg;
EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
CHECK(dex->EnableWrite()) << "Failed to enable write";
@@ -344,12 +351,13 @@ class DexLayoutTest : public CommonRuntimeTest {
const std::string& dex_location) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- bool result = DexFileLoader::Open(input_dex.c_str(),
- input_dex,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg,
- &dex_files);
+ const ArtDexFileLoader dex_file_loader;
+ bool result = dex_file_loader.Open(input_dex.c_str(),
+ input_dex,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg,
+ &dex_files);
ASSERT_TRUE(result) << error_msg;
ASSERT_GE(dex_files.size(), 1u);
@@ -422,10 +430,7 @@ class DexLayoutTest : public CommonRuntimeTest {
}
// -v makes sure that the layout did not corrupt the dex file.
-
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", dex_file, profile_file, output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
+ if (!UnlinkFile(dex_file) || !UnlinkFile(profile_file) || !UnlinkFile(output_dex)) {
return false;
}
return true;
@@ -463,7 +468,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// -v makes sure that the layout did not corrupt the dex file.
std::vector<std::string> dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -475,7 +480,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// -i since the checksum won't match from the first layout.
std::vector<std::string> second_dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, output_dex };
- if (!DexLayoutExec(second_dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -486,10 +491,11 @@ class DexLayoutTest : public CommonRuntimeTest {
diff_result = false;
}
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", dex_file, profile_file, output_dex, second_output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
- return false;
+ std::vector<std::string> test_files = { dex_file, profile_file, output_dex, second_output_dex };
+ for (auto test_file : test_files) {
+ if (!UnlinkFile(test_file)) {
+ return false;
+ }
}
return diff_result;
@@ -508,7 +514,7 @@ class DexLayoutTest : public CommonRuntimeTest {
std::string output_dex = tmp_dir + "classes.dex.new";
std::vector<std::string> dexlayout_args = { "-w", tmp_dir, "-o", "/dev/null", input_dex };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -518,9 +524,11 @@ class DexLayoutTest : public CommonRuntimeTest {
return false;
}
- std::vector<std::string> rm_exec_argv = { "/bin/rm", input_dex, output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
- return false;
+ std::vector<std::string> dex_files = { input_dex, output_dex };
+ for (auto dex_file : dex_files) {
+ if (!UnlinkFile(dex_file)) {
+ return false;
+ }
}
return true;
}
@@ -546,17 +554,27 @@ class DexLayoutTest : public CommonRuntimeTest {
return true;
}
- bool DexLayoutExec(const std::vector<std::string>& dexlayout_args, std::string* error_msg) {
+ bool DexLayoutExec(const std::vector<std::string>& dexlayout_args,
+ std::string* error_msg,
+ bool pass_default_cdex_option = true) {
std::vector<std::string> argv;
std::string dexlayout = GetDexLayoutPath();
CHECK(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
argv.push_back(dexlayout);
+ if (pass_default_cdex_option) {
+ std::vector<std::string> cdex_level = DefaultCompactDexOption();
+ argv.insert(argv.end(), cdex_level.begin(), cdex_level.end());
+ }
argv.insert(argv.end(), dexlayout_args.begin(), dexlayout_args.end());
return ::art::Exec(argv, error_msg);
}
+
+ bool UnlinkFile(const std::string& file_path) {
+ return unix_file::FdFile(file_path, 0, false).Unlink();
+ }
};
@@ -726,11 +744,29 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
CHECK(mutated_successfully)
<< "Failed to find candidate code item with only one code unit in last instruction.";
});
- std::vector<std::string> dexlayout_args = { "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+
+ std::string error_msg;
+
+ ScratchFile tmp_file;
+ const std::string& tmp_name = tmp_file.GetFilename();
+ size_t tmp_last_slash = tmp_name.rfind('/');
+ std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+ ScratchFile profile_file;
+
+ std::vector<std::string> dexlayout_args =
+ { "-i",
+ "-v",
+ "-w", tmp_dir,
+ "-o", tmp_name,
+ "-p", profile_file.GetFilename(),
+ temp_dex.GetFilename()
+ };
+ // -v makes sure that the layout did not corrupt the dex file.
ASSERT_TRUE(DexLayoutExec(&temp_dex,
/*dex_filename*/ nullptr,
- nullptr /* profile_file */,
+ &profile_file,
dexlayout_args));
+ ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
}
// Test that link data is written out (or at least the header is updated).
@@ -768,11 +804,61 @@ TEST_F(DexLayoutTest, LinkData) {
/*dex_filename*/ nullptr,
&profile_file,
dexlayout_args));
+ ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
+}
- std::string output_dex = temp_dex.GetFilename() + ".new";
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", output_dex };
- ASSERT_TRUE(::art::Exec(rm_exec_argv, &error_msg));
+TEST_F(DexLayoutTest, ClassFilter) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
+ const std::string input_jar = GetTestDexFileName("ManyMethods");
+ CHECK(dex_file_loader.Open(input_jar.c_str(),
+ input_jar.c_str(),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg,
+ &dex_files)) << error_msg;
+ ASSERT_EQ(dex_files.size(), 1u);
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ EXPECT_GT(dex_file->NumClassDefs(), 1u);
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ LOG(INFO) << dex_file->GetClassDescriptor(class_def);
+ }
+ Options options;
+ // Filter out all the classes other than the one below based on class descriptor.
+ options.class_filter_.insert("LManyMethods$Strings;");
+ DexLayout dexlayout(options,
+ /*info*/ nullptr,
+ /*out_file*/ nullptr,
+ /*header*/ nullptr);
+ std::unique_ptr<DexContainer> out;
+ dexlayout.ProcessDexFile(dex_file->GetLocation().c_str(),
+ dex_file.get(),
+ /*dex_file_index*/ 0,
+ &out);
+ std::unique_ptr<const DexFile> output_dex_file(
+ dex_file_loader.OpenWithDataSection(
+ out->GetMainSection()->Begin(),
+ out->GetMainSection()->Size(),
+ out->GetDataSection()->Begin(),
+ out->GetDataSection()->Size(),
+ dex_file->GetLocation().c_str(),
+ /* checksum */ 0,
+ /*oat_dex_file*/ nullptr,
+ /* verify */ true,
+ /*verify_checksum*/ false,
+ &error_msg));
+ ASSERT_TRUE(output_dex_file != nullptr);
+
+ ASSERT_EQ(output_dex_file->NumClassDefs(), options.class_filter_.size());
+ for (uint32_t i = 0; i < output_dex_file->NumClassDefs(); ++i) {
+ // Check that every class in the output dex file is in the filter.
+ const DexFile::ClassDef& class_def = output_dex_file->GetClassDef(i);
+ ASSERT_TRUE(options.class_filter_.find(output_dex_file->GetClassDescriptor(class_def)) !=
+ options.class_filter_.end());
+ }
+ }
}
} // namespace art
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index 8ecff4210e..2703732db6 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -17,7 +17,11 @@ art_cc_binary {
host_supported: true,
srcs: ["dexlist.cc"],
cflags: ["-Wall", "-Werror"],
- shared_libs: ["libart", "libbase"],
+ shared_libs: ["libdexfile", "libbase"],
+ // TODO: fix b/72216369 and remove the need for this.
+ include_dirs: [
+ "art/runtime" // dex utils.
+ ],
}
art_cc_test {
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 556938b563..31a146d90e 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -23,15 +23,20 @@
* List all methods in all concrete classes in one or more DEX files.
*/
+#include <fcntl.h>
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
-#include "base/logging.h" // For InitLogging.
-#include "dex/code_item_accessors-no_art-inl.h"
+#include <android-base/logging.h>
+
+#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
-#include "mem_map.h"
-#include "runtime.h"
namespace art {
@@ -100,7 +105,7 @@ static void dumpMethod(const DexFile* pDexFile,
if (pCode == nullptr || codeOffset == 0) {
return;
}
- CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, pDexFile->GetDebugInfoOffset(pCode));
+ CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
// Method information.
const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
@@ -165,6 +170,34 @@ void dumpClass(const DexFile* pDexFile, u4 idx) {
}
}
+static bool openAndMapFile(const char* fileName,
+ const uint8_t** base,
+ size_t* size,
+ std::string* error_msg) {
+ int fd = open(fileName, O_RDONLY);
+ if (fd < 0) {
+ *error_msg = "open failed";
+ return false;
+ }
+ struct stat st;
+ if (fstat(fd, &st) < 0) {
+ *error_msg = "stat failed";
+ return false;
+ }
+ *size = st.st_size;
+ if (*size == 0) {
+ *error_msg = "size == 0";
+ return false;
+ }
+ void* addr = mmap(nullptr /*addr*/, *size, PROT_READ, MAP_PRIVATE, fd, 0 /*offset*/);
+ if (addr == MAP_FAILED) {
+ *error_msg = "mmap failed";
+ return false;
+ }
+ *base = reinterpret_cast<const uint8_t*>(addr);
+ return true;
+}
+
/*
* Processes a single file (either direct .dex or indirect .zip/.jar/.apk).
*/
@@ -172,12 +205,18 @@ static int processFile(const char* fileName) {
// If the file is not a .dex file, the function tries .zip/.jar/.apk files,
// all of which are Zip archives with "classes.dex" inside.
static constexpr bool kVerifyChecksum = true;
+ const uint8_t* base = nullptr;
+ size_t size = 0;
std::string error_msg;
+ if (!openAndMapFile(fileName, &base, &size, &error_msg)) {
+ LOG(ERROR) << error_msg;
+ return -1;
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
- fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
- fputs(error_msg.c_str(), stderr);
- fputc('\n', stderr);
+ const DexFileLoader dex_file_loader;
+ if (!dex_file_loader.OpenAll(
+ base, size, fileName, /*verify*/ true, kVerifyChecksum, &error_msg, &dex_files)) {
+ LOG(ERROR) << error_msg;
return -1;
}
@@ -198,19 +237,15 @@ static int processFile(const char* fileName) {
* Shows usage.
*/
static void usage(void) {
- fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
- fprintf(stderr, "%s: [-m p.c.m] [-o outfile] dexfile...\n", gProgName);
- fprintf(stderr, "\n");
+ LOG(ERROR) << "Copyright (C) 2007 The Android Open Source Project\n";
+ LOG(ERROR) << gProgName << ": [-m p.c.m] [-o outfile] dexfile...";
+ LOG(ERROR) << "";
}
/*
* Main driver of the dexlist utility.
*/
int dexlistDriver(int argc, char** argv) {
- // Art specific set up.
- InitLogging(argv, Runtime::Abort);
- MemMap::Init();
-
// Reset options.
bool wantUsage = false;
memset(&gOptions, 0, sizeof(gOptions));
@@ -233,7 +268,7 @@ int dexlistDriver(int argc, char** argv) {
gOptions.argCopy = strdup(optarg);
char* meth = strrchr(gOptions.argCopy, '.');
if (meth == nullptr) {
- fprintf(stderr, "Expected: package.Class.method\n");
+ LOG(ERROR) << "Expected: package.Class.method";
wantUsage = true;
} else {
*meth = '\0';
@@ -250,7 +285,7 @@ int dexlistDriver(int argc, char** argv) {
// Detect early problems.
if (optind == argc) {
- fprintf(stderr, "%s: no file specified\n", gProgName);
+ LOG(ERROR) << "No file specified";
wantUsage = true;
}
if (wantUsage) {
@@ -263,7 +298,7 @@ int dexlistDriver(int argc, char** argv) {
if (gOptions.outputFileName) {
gOutFile = fopen(gOptions.outputFileName, "w");
if (!gOutFile) {
- fprintf(stderr, "Can't open %s\n", gOptions.outputFileName);
+ PLOG(ERROR) << "Can't open " << gOptions.outputFileName;
free(gOptions.argCopy);
return 1;
}
@@ -283,6 +318,9 @@ int dexlistDriver(int argc, char** argv) {
} // namespace art
int main(int argc, char** argv) {
+ // Output all logging to stderr.
+ android::base::SetLogger(android::base::StderrLogger);
+
return art::dexlistDriver(argc, argv);
}
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index bedc4576d5..6d4b3e3e52 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -260,6 +260,7 @@ class DexoptAnalyzer FINAL {
oat_file_assistant = std::make_unique<OatFileAssistant>(dex_file_.c_str(),
isa_,
false /*load_executable*/,
+ false /*only_load_system_executable*/,
vdex_fd_,
oat_fd_,
zip_fd_);
diff --git a/dt_fd_forward/dt_fd_forward.cc b/dt_fd_forward/dt_fd_forward.cc
index cf3088dc60..116cdf84ed 100644
--- a/dt_fd_forward/dt_fd_forward.cc
+++ b/dt_fd_forward/dt_fd_forward.cc
@@ -162,7 +162,7 @@ IOResult FdForwardTransport::ReadFullyWithoutChecks(void* data, size_t ndata) {
IOResult FdForwardTransport::ReadUpToMax(void* data, size_t ndata, /*out*/size_t* read_amount) {
CHECK_GE(read_fd_.get(), 0);
int avail;
- int res = ioctl(read_fd_, FIONREAD, &avail);
+ int res = TEMP_FAILURE_RETRY(ioctl(read_fd_, FIONREAD, &avail));
if (res < 0) {
DT_IO_ERROR("Failed ioctl(read_fd_, FIONREAD, &avail)");
return IOResult::kError;
@@ -172,7 +172,7 @@ IOResult FdForwardTransport::ReadUpToMax(void* data, size_t ndata, /*out*/size_t
if (*read_amount == 0) {
// Check if the read would cause an EOF.
struct pollfd pollfd = { read_fd_, POLLRDHUP, 0 };
- res = poll(&pollfd, /*nfds*/1, /*timeout*/0);
+ res = TEMP_FAILURE_RETRY(poll(&pollfd, /*nfds*/1, /*timeout*/0));
if (res < 0 || (pollfd.revents & POLLERR) == POLLERR) {
DT_IO_ERROR("Failed poll on read fd.");
return IOResult::kError;
@@ -214,13 +214,13 @@ IOResult FdForwardTransport::ReadFully(void* data, size_t ndata) {
// No more data. Sleep without locks until more is available. We don't actually check for any
// errors since possible ones are (1) the read_fd_ is closed or wakeup happens which are both
// fine since the wakeup_fd_ or the poll failing will wake us up.
- int poll_res = poll(pollfds, 2, -1);
+ int poll_res = TEMP_FAILURE_RETRY(poll(pollfds, 2, -1));
if (poll_res < 0) {
DT_IO_ERROR("Failed to poll!");
}
// Clear the wakeup_fd regardless.
uint64_t val;
- int unused = read(wakeup_fd_, &val, sizeof(val));
+ int unused = TEMP_FAILURE_RETRY(read(wakeup_fd_, &val, sizeof(val)));
DCHECK(unused == sizeof(val) || errno == EAGAIN);
if (poll_res < 0) {
return IOResult::kError;
@@ -276,17 +276,16 @@ static void SendAcceptMessage(int fd) {
TEMP_FAILURE_RETRY(send(fd, kAcceptMessage, sizeof(kAcceptMessage), MSG_EOR));
}
-IOResult FdForwardTransport::ReceiveFdsFromSocket() {
+IOResult FdForwardTransport::ReceiveFdsFromSocket(bool* do_handshake) {
union {
cmsghdr cm;
uint8_t buffer[CMSG_SPACE(sizeof(FdSet))];
} msg_union;
- // We don't actually care about the data. Only FDs. We need to have an iovec anyway to tell if we
- // got the values or not though.
- char dummy = '\0';
+ // This lets us know if we need to do a handshake or not.
+ char message[128];
iovec iov;
- iov.iov_base = &dummy;
- iov.iov_len = sizeof(dummy);
+ iov.iov_base = message;
+ iov.iov_len = sizeof(message);
msghdr msg;
memset(&msg, 0, sizeof(msg));
@@ -307,8 +306,22 @@ IOResult FdForwardTransport::ReceiveFdsFromSocket() {
return IOResult::kError;
}
FdSet out_fds = FdSet::ReadData(CMSG_DATA(cmsg));
- if (out_fds.read_fd_ < 0 || out_fds.write_fd_ < 0 || out_fds.write_lock_fd_ < 0) {
+ bool failed = false;
+ if (out_fds.read_fd_ < 0 ||
+ out_fds.write_fd_ < 0 ||
+ out_fds.write_lock_fd_ < 0) {
DT_IO_ERROR("Received fds were invalid!");
+ failed = true;
+ } else if (strcmp(kPerformHandshakeMessage, message) == 0) {
+ *do_handshake = true;
+ } else if (strcmp(kSkipHandshakeMessage, message) == 0) {
+ *do_handshake = false;
+ } else {
+ DT_IO_ERROR("Unknown message sent with fds.");
+ failed = true;
+ }
+
+ if (failed) {
if (out_fds.read_fd_ >= 0) {
close(out_fds.read_fd_);
}
@@ -346,8 +359,9 @@ jdwpTransportError FdForwardTransport::Accept() {
state_cv_.wait(lk);
}
+ bool do_handshake = false;
DCHECK_NE(listen_fd_.get(), -1);
- if (ReceiveFdsFromSocket() != IOResult::kOk) {
+ if (ReceiveFdsFromSocket(&do_handshake) != IOResult::kOk) {
CHECK(ChangeState(TransportState::kOpening, TransportState::kListening));
return ERR(IO_ERROR);
}
@@ -355,24 +369,27 @@ jdwpTransportError FdForwardTransport::Accept() {
current_seq_num_++;
// Moved to the opening state.
- char handshake_recv[sizeof(kJdwpHandshake)];
- memset(handshake_recv, 0, sizeof(handshake_recv));
- IOResult res = ReadFullyWithoutChecks(handshake_recv, sizeof(handshake_recv));
- if (res != IOResult::kOk ||
- strncmp(handshake_recv, kJdwpHandshake, sizeof(kJdwpHandshake)) != 0) {
- DT_IO_ERROR("Failed to read handshake");
- CHECK(ChangeState(TransportState::kOpening, TransportState::kListening));
- CloseFdsLocked();
- // Retry.
- continue;
- }
- res = WriteFullyWithoutChecks(kJdwpHandshake, sizeof(kJdwpHandshake));
- if (res != IOResult::kOk) {
- DT_IO_ERROR("Failed to write handshake");
- CHECK(ChangeState(TransportState::kOpening, TransportState::kListening));
- CloseFdsLocked();
- // Retry.
- continue;
+ if (do_handshake) {
+ // Perform the handshake
+ char handshake_recv[sizeof(kJdwpHandshake)];
+ memset(handshake_recv, 0, sizeof(handshake_recv));
+ IOResult res = ReadFullyWithoutChecks(handshake_recv, sizeof(handshake_recv));
+ if (res != IOResult::kOk ||
+ strncmp(handshake_recv, kJdwpHandshake, sizeof(kJdwpHandshake)) != 0) {
+ DT_IO_ERROR("Failed to read handshake");
+ CHECK(ChangeState(TransportState::kOpening, TransportState::kListening));
+ CloseFdsLocked();
+ // Retry.
+ continue;
+ }
+ res = WriteFullyWithoutChecks(kJdwpHandshake, sizeof(kJdwpHandshake));
+ if (res != IOResult::kOk) {
+ DT_IO_ERROR("Failed to write handshake");
+ CHECK(ChangeState(TransportState::kOpening, TransportState::kListening));
+ CloseFdsLocked();
+ // Retry.
+ continue;
+ }
}
break;
}
diff --git a/dt_fd_forward/dt_fd_forward.h b/dt_fd_forward/dt_fd_forward.h
index 9303c59acd..07a574bfa0 100644
--- a/dt_fd_forward/dt_fd_forward.h
+++ b/dt_fd_forward/dt_fd_forward.h
@@ -105,7 +105,9 @@ class FdForwardTransport : public jdwpTransportEnv {
bool ChangeState(TransportState old_state, TransportState new_state); // REQUIRES(state_mutex_);
- IOResult ReceiveFdsFromSocket();
+ // Gets the fds from the server side. do_handshake returns whether the transport can skip the
+ // jdwp handshake.
+ IOResult ReceiveFdsFromSocket(/*out*/bool* do_handshake);
IOResult WriteFully(const void* data, size_t ndata); // REQUIRES(!state_mutex_);
IOResult WriteFullyWithoutChecks(const void* data, size_t ndata); // REQUIRES(state_mutex_);
diff --git a/dt_fd_forward/export/fd_transport.h b/dt_fd_forward/export/fd_transport.h
index 245f0c2275..144ac5c6ec 100644
--- a/dt_fd_forward/export/fd_transport.h
+++ b/dt_fd_forward/export/fd_transport.h
@@ -47,6 +47,12 @@ struct FdSet {
}
};
+// Sent with the file descriptors if the transport should not skip waiting for the handshake.
+static constexpr char kPerformHandshakeMessage[] = "HANDSHAKE:REQD";
+
+// Sent with the file descriptors if the transport can skip waiting for the handshake.
+static constexpr char kSkipHandshakeMessage[] = "HANDSHAKE:SKIP";
+
// This message is sent over the fd associated with the transport when we are listening for fds.
static constexpr char kListenStartMessage[] = "dt_fd_forward:START-LISTEN";
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 4851722734..c93c172eb4 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -36,6 +36,7 @@ art_cc_binary {
"libart",
"libart-compiler",
"libart-disassembler",
+ "libdexfile",
"libbase",
],
}
@@ -50,6 +51,7 @@ art_cc_binary {
"libartd",
"libartd-compiler",
"libartd-disassembler",
+ "libdexfile",
"libbase",
],
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index ca8077fea1..6c9f569b19 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,7 @@
#include <unordered_set>
#include <vector>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
@@ -38,6 +39,7 @@
#include "class_linker-inl.h"
#include "class_linker.h"
#include "compiled_method.h"
+#include "debug/debug_info.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
#include "dex/code_item_accessors-inl.h"
@@ -171,14 +173,15 @@ class OatSymbolizer FINAL {
text_size,
oat_file_->BssSize(),
oat_file_->BssMethodsOffset(),
- oat_file_->BssRootsOffset());
+ oat_file_->BssRootsOffset(),
+ oat_file_->VdexSize());
builder_->WriteDynamicSection();
const OatHeader& oat_header = oat_file_->GetOatHeader();
#define DO_TRAMPOLINE(fn_name) \
if (oat_header.Get ## fn_name ## Offset() != 0) { \
debug::MethodDebugInfo info = {}; \
- info.trampoline_name = #fn_name; \
+ info.custom_name = #fn_name; \
info.isa = oat_header.GetInstructionSet(); \
info.is_code_address_text_relative = true; \
size_t code_offset = oat_header.Get ## fn_name ## Offset(); \
@@ -201,8 +204,13 @@ class OatSymbolizer FINAL {
// TODO: Try to symbolize link-time thunks?
// This would require disassembling all methods to find branches outside the method code.
+ // TODO: Add symbols for dex bytecode in the .dex section.
+
+ debug::DebugInfo debug_info{};
+ debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_debug_infos_);
+
debug::WriteDebugInfo(builder_.get(),
- ArrayRef<const debug::MethodDebugInfo>(method_debug_infos_),
+ debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
true /* write_oat_patches */);
@@ -301,7 +309,7 @@ class OatSymbolizer FINAL {
const void* code_address = EntryPointToCodePointer(reinterpret_cast<void*>(entry_point));
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = &dex_file;
info.class_def_index = class_def_index;
info.dex_method_index = dex_method_index;
@@ -717,7 +725,6 @@ class OatDumper {
}
vdex_file->Unquicken(MakeNonOwningPointerVector(tmp_dex_files),
- vdex_file->GetQuickeningInfo(),
/* decompile_return_instruction */ true);
*dex_files = std::move(tmp_dex_files);
@@ -1049,14 +1056,19 @@ class OatDumper {
os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum());
const uint8_t* const oat_file_begin = oat_dex_file.GetOatFile()->Begin();
- const uint8_t* const vdex_file_begin = oat_dex_file.GetOatFile()->DexBegin();
-
- // Print data range of the dex file embedded inside the corresponding vdex file.
- const uint8_t* const dex_file_pointer = oat_dex_file.GetDexFilePointer();
- uint32_t dex_offset = dchecked_integral_cast<uint32_t>(dex_file_pointer - vdex_file_begin);
- os << StringPrintf("dex-file: 0x%08x..0x%08x\n",
- dex_offset,
- dchecked_integral_cast<uint32_t>(dex_offset + oat_dex_file.FileSize() - 1));
+ if (oat_dex_file.GetOatFile()->ContainsDexCode()) {
+ const uint8_t* const vdex_file_begin = oat_dex_file.GetOatFile()->DexBegin();
+
+ // Print data range of the dex file embedded inside the corresponding vdex file.
+ const uint8_t* const dex_file_pointer = oat_dex_file.GetDexFilePointer();
+ uint32_t dex_offset = dchecked_integral_cast<uint32_t>(dex_file_pointer - vdex_file_begin);
+ os << StringPrintf(
+ "dex-file: 0x%08x..0x%08x\n",
+ dex_offset,
+ dchecked_integral_cast<uint32_t>(dex_offset + oat_dex_file.FileSize() - 1));
+ } else {
+ os << StringPrintf("dex-file not in VDEX file\n");
+ }
// Create the dex file early. A lot of print-out things depend on it.
std::string error_msg;
@@ -1145,6 +1157,7 @@ class OatDumper {
// Vdex unquicken output should match original input bytecode
uint32_t orig_checksum =
reinterpret_cast<DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_;
+ CHECK_EQ(orig_checksum, dex_file->CalculateChecksum());
if (orig_checksum != dex_file->CalculateChecksum()) {
os << "Unexpected checksum from unquicken dex file '" << dex_file_location << "'\n";
return false;
@@ -1197,7 +1210,11 @@ class OatDumper {
return false;
}
- if (!file->WriteFully(dex_file->Begin(), fsize)) {
+ bool success = false;
+ success = file->WriteFully(dex_file->Begin(), fsize);
+ // }
+
+ if (!success) {
os << "Failed to write dex file";
file->Erase();
return false;
@@ -2531,7 +2548,7 @@ class ImageDumper {
}
}
} else {
- CodeItemDataAccessor code_item_accessor(method);
+ CodeItemDataAccessor code_item_accessor(method->DexInstructionData());
size_t dex_instruction_bytes = code_item_accessor.InsnsSizeInCodeUnits() * 2;
stats_.dex_instruction_bytes += dex_instruction_bytes;
@@ -2901,7 +2918,7 @@ static int DumpImage(gc::space::ImageSpace* image_space,
std::ostream* os) REQUIRES_SHARED(Locks::mutator_lock_) {
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
- fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
+ LOG(ERROR) << "Invalid image header " << image_space->GetImageLocation();
return EXIT_FAILURE;
}
ImageDumper image_dumper(os, *image_space, image_header, options);
@@ -3035,8 +3052,15 @@ static int DumpOatWithoutRuntime(OatFile* oat_file, OatDumperOptions* options, s
return (success) ? EXIT_SUCCESS : EXIT_FAILURE;
}
-static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions* options,
+static int DumpOat(Runtime* runtime,
+ const char* oat_filename,
+ const char* dex_filename,
+ OatDumperOptions* options,
std::ostream* os) {
+ if (dex_filename == nullptr) {
+ LOG(WARNING) << "No dex filename provided, "
+ << "oatdump might fail if the oat file does not contain the dex code.";
+ }
std::string error_msg;
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_filename,
oat_filename,
@@ -3044,10 +3068,10 @@ static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions*
nullptr,
false,
/*low_4gb*/false,
- nullptr,
+ dex_filename,
&error_msg));
if (oat_file == nullptr) {
- fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
+ LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
return EXIT_FAILURE;
}
@@ -3058,7 +3082,10 @@ static int DumpOat(Runtime* runtime, const char* oat_filename, OatDumperOptions*
}
}
-static int SymbolizeOat(const char* oat_filename, std::string& output_name, bool no_bits) {
+static int SymbolizeOat(const char* oat_filename,
+ const char* dex_filename,
+ std::string& output_name,
+ bool no_bits) {
std::string error_msg;
std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_filename,
oat_filename,
@@ -3066,10 +3093,10 @@ static int SymbolizeOat(const char* oat_filename, std::string& output_name, bool
nullptr,
false,
/*low_4gb*/false,
- nullptr,
+ dex_filename,
&error_msg));
if (oat_file == nullptr) {
- fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
+ LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
return EXIT_FAILURE;
}
@@ -3084,7 +3111,7 @@ static int SymbolizeOat(const char* oat_filename, std::string& output_name, bool
result = oat_symbolizer.Symbolize();
}
if (!result) {
- fprintf(stderr, "Failed to symbolize\n");
+ LOG(ERROR) << "Failed to symbolize";
return EXIT_FAILURE;
}
@@ -3096,7 +3123,8 @@ class IMTDumper {
static bool Dump(Runtime* runtime,
const std::string& imt_file,
bool dump_imt_stats,
- const char* oat_filename) {
+ const char* oat_filename,
+ const char* dex_filename) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -3112,10 +3140,10 @@ class IMTDumper {
nullptr,
false,
/*low_4gb*/false,
- nullptr,
+ dex_filename,
&error_msg));
if (oat_file == nullptr) {
- fprintf(stderr, "Failed to open oat file from '%s': %s\n", oat_filename, error_msg.c_str());
+ LOG(ERROR) << "Failed to open oat file from '" << oat_filename << "': " << error_msg;
return false;
}
@@ -3547,6 +3575,8 @@ struct OatdumpArgs : public CmdlineArgs {
if (option.starts_with("--oat-file=")) {
oat_filename_ = option.substr(strlen("--oat-file=")).data();
+ } else if (option.starts_with("--dex-file=")) {
+ dex_filename_ = option.substr(strlen("--dex-file=")).data();
} else if (option.starts_with("--image=")) {
image_location_ = option.substr(strlen("--image=")).data();
} else if (option == "--no-dump:vmap") {
@@ -3698,6 +3728,7 @@ struct OatdumpArgs : public CmdlineArgs {
public:
const char* oat_filename_ = nullptr;
+ const char* dex_filename_ = nullptr;
const char* class_filter_ = "";
const char* method_filter_ = "";
const char* image_location_ = nullptr;
@@ -3758,10 +3789,12 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
// This is what "strip --only-keep-debug" does when it creates separate ELF file
// with only debug data. We use it in similar way to exclude .rodata and .text.
bool no_bits = args_->only_keep_debug_;
- return SymbolizeOat(args_->oat_filename_, args_->output_name_, no_bits) == EXIT_SUCCESS;
+ return SymbolizeOat(args_->oat_filename_, args_->dex_filename_, args_->output_name_, no_bits)
+ == EXIT_SUCCESS;
} else {
return DumpOat(nullptr,
args_->oat_filename_,
+ args_->dex_filename_,
oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
@@ -3774,12 +3807,14 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
return IMTDumper::Dump(runtime,
args_->imt_dump_,
args_->imt_stat_dump_,
- args_->oat_filename_);
+ args_->oat_filename_,
+ args_->dex_filename_);
}
if (args_->oat_filename_ != nullptr) {
return DumpOat(runtime,
args_->oat_filename_,
+ args_->dex_filename_,
oat_dumper_options_.get(),
args_->os_) == EXIT_SUCCESS;
}
@@ -3793,6 +3828,9 @@ struct OatdumpMain : public CmdlineMain<OatdumpArgs> {
} // namespace art
int main(int argc, char** argv) {
+ // Output all logging to stderr.
+ android::base::SetLogger(android::base::StderrLogger);
+
art::OatdumpMain main;
return main.Main(argc, argv);
}
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 0283999d54..1500bcae24 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -58,6 +58,7 @@ cc_defaults {
"libopenjdkjvmti_headers",
],
shared_libs: [
+ "libdexfile",
"libbase",
],
}
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index aae805569f..a0c7f40b6f 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -83,6 +83,12 @@ DeoptManager gDeoptManager;
} \
} while (false)
+// Returns whether we are able to use all jvmti features.
+static bool IsFullJvmtiAvailable() {
+ art::Runtime* runtime = art::Runtime::Current();
+ return runtime->GetInstrumentation()->IsForcedInterpretOnly() || runtime->IsJavaDebuggable();
+}
+
class JvmtiFunctions {
private:
static jvmtiError getEnvironmentError(jvmtiEnv* env) {
@@ -1092,10 +1098,64 @@ class JvmtiFunctions {
&gEventHandler);
}
+#define FOR_ALL_CAPABILITIES(FUN) \
+ FUN(can_tag_objects) \
+ FUN(can_generate_field_modification_events) \
+ FUN(can_generate_field_access_events) \
+ FUN(can_get_bytecodes) \
+ FUN(can_get_synthetic_attribute) \
+ FUN(can_get_owned_monitor_info) \
+ FUN(can_get_current_contended_monitor) \
+ FUN(can_get_monitor_info) \
+ FUN(can_pop_frame) \
+ FUN(can_redefine_classes) \
+ FUN(can_signal_thread) \
+ FUN(can_get_source_file_name) \
+ FUN(can_get_line_numbers) \
+ FUN(can_get_source_debug_extension) \
+ FUN(can_access_local_variables) \
+ FUN(can_maintain_original_method_order) \
+ FUN(can_generate_single_step_events) \
+ FUN(can_generate_exception_events) \
+ FUN(can_generate_frame_pop_events) \
+ FUN(can_generate_breakpoint_events) \
+ FUN(can_suspend) \
+ FUN(can_redefine_any_class) \
+ FUN(can_get_current_thread_cpu_time) \
+ FUN(can_get_thread_cpu_time) \
+ FUN(can_generate_method_entry_events) \
+ FUN(can_generate_method_exit_events) \
+ FUN(can_generate_all_class_hook_events) \
+ FUN(can_generate_compiled_method_load_events) \
+ FUN(can_generate_monitor_events) \
+ FUN(can_generate_vm_object_alloc_events) \
+ FUN(can_generate_native_method_bind_events) \
+ FUN(can_generate_garbage_collection_events) \
+ FUN(can_generate_object_free_events) \
+ FUN(can_force_early_return) \
+ FUN(can_get_owned_monitor_stack_depth_info) \
+ FUN(can_get_constant_pool) \
+ FUN(can_set_native_method_prefix) \
+ FUN(can_retransform_classes) \
+ FUN(can_retransform_any_class) \
+ FUN(can_generate_resource_exhaustion_heap_events) \
+ FUN(can_generate_resource_exhaustion_threads_events)
+
static jvmtiError GetPotentialCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
*capabilities_ptr = kPotentialCapabilities;
+ if (UNLIKELY(!IsFullJvmtiAvailable())) {
+#define REMOVE_NONDEBUGGABLE_UNSUPPORTED(e) \
+ do { \
+ if (kNonDebuggableUnsupportedCapabilities.e == 1) { \
+ capabilities_ptr->e = 0; \
+ } \
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(REMOVE_NONDEBUGGABLE_UNSUPPORTED);
+#undef REMOVE_NONDEBUGGABLE_UNSUPPORTED
+ }
return OK;
}
@@ -1122,49 +1182,9 @@ class JvmtiFunctions {
ret = ERR(NOT_AVAILABLE); \
} \
} \
- } while (false)
-
- ADD_CAPABILITY(can_tag_objects);
- ADD_CAPABILITY(can_generate_field_modification_events);
- ADD_CAPABILITY(can_generate_field_access_events);
- ADD_CAPABILITY(can_get_bytecodes);
- ADD_CAPABILITY(can_get_synthetic_attribute);
- ADD_CAPABILITY(can_get_owned_monitor_info);
- ADD_CAPABILITY(can_get_current_contended_monitor);
- ADD_CAPABILITY(can_get_monitor_info);
- ADD_CAPABILITY(can_pop_frame);
- ADD_CAPABILITY(can_redefine_classes);
- ADD_CAPABILITY(can_signal_thread);
- ADD_CAPABILITY(can_get_source_file_name);
- ADD_CAPABILITY(can_get_line_numbers);
- ADD_CAPABILITY(can_get_source_debug_extension);
- ADD_CAPABILITY(can_access_local_variables);
- ADD_CAPABILITY(can_maintain_original_method_order);
- ADD_CAPABILITY(can_generate_single_step_events);
- ADD_CAPABILITY(can_generate_exception_events);
- ADD_CAPABILITY(can_generate_frame_pop_events);
- ADD_CAPABILITY(can_generate_breakpoint_events);
- ADD_CAPABILITY(can_suspend);
- ADD_CAPABILITY(can_redefine_any_class);
- ADD_CAPABILITY(can_get_current_thread_cpu_time);
- ADD_CAPABILITY(can_get_thread_cpu_time);
- ADD_CAPABILITY(can_generate_method_entry_events);
- ADD_CAPABILITY(can_generate_method_exit_events);
- ADD_CAPABILITY(can_generate_all_class_hook_events);
- ADD_CAPABILITY(can_generate_compiled_method_load_events);
- ADD_CAPABILITY(can_generate_monitor_events);
- ADD_CAPABILITY(can_generate_vm_object_alloc_events);
- ADD_CAPABILITY(can_generate_native_method_bind_events);
- ADD_CAPABILITY(can_generate_garbage_collection_events);
- ADD_CAPABILITY(can_generate_object_free_events);
- ADD_CAPABILITY(can_force_early_return);
- ADD_CAPABILITY(can_get_owned_monitor_stack_depth_info);
- ADD_CAPABILITY(can_get_constant_pool);
- ADD_CAPABILITY(can_set_native_method_prefix);
- ADD_CAPABILITY(can_retransform_classes);
- ADD_CAPABILITY(can_retransform_any_class);
- ADD_CAPABILITY(can_generate_resource_exhaustion_heap_events);
- ADD_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(ADD_CAPABILITY);
#undef ADD_CAPABILITY
gEventHandler.HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
@@ -1186,49 +1206,9 @@ class JvmtiFunctions {
changed.e = 1; \
} \
} \
- } while (false)
-
- DEL_CAPABILITY(can_tag_objects);
- DEL_CAPABILITY(can_generate_field_modification_events);
- DEL_CAPABILITY(can_generate_field_access_events);
- DEL_CAPABILITY(can_get_bytecodes);
- DEL_CAPABILITY(can_get_synthetic_attribute);
- DEL_CAPABILITY(can_get_owned_monitor_info);
- DEL_CAPABILITY(can_get_current_contended_monitor);
- DEL_CAPABILITY(can_get_monitor_info);
- DEL_CAPABILITY(can_pop_frame);
- DEL_CAPABILITY(can_redefine_classes);
- DEL_CAPABILITY(can_signal_thread);
- DEL_CAPABILITY(can_get_source_file_name);
- DEL_CAPABILITY(can_get_line_numbers);
- DEL_CAPABILITY(can_get_source_debug_extension);
- DEL_CAPABILITY(can_access_local_variables);
- DEL_CAPABILITY(can_maintain_original_method_order);
- DEL_CAPABILITY(can_generate_single_step_events);
- DEL_CAPABILITY(can_generate_exception_events);
- DEL_CAPABILITY(can_generate_frame_pop_events);
- DEL_CAPABILITY(can_generate_breakpoint_events);
- DEL_CAPABILITY(can_suspend);
- DEL_CAPABILITY(can_redefine_any_class);
- DEL_CAPABILITY(can_get_current_thread_cpu_time);
- DEL_CAPABILITY(can_get_thread_cpu_time);
- DEL_CAPABILITY(can_generate_method_entry_events);
- DEL_CAPABILITY(can_generate_method_exit_events);
- DEL_CAPABILITY(can_generate_all_class_hook_events);
- DEL_CAPABILITY(can_generate_compiled_method_load_events);
- DEL_CAPABILITY(can_generate_monitor_events);
- DEL_CAPABILITY(can_generate_vm_object_alloc_events);
- DEL_CAPABILITY(can_generate_native_method_bind_events);
- DEL_CAPABILITY(can_generate_garbage_collection_events);
- DEL_CAPABILITY(can_generate_object_free_events);
- DEL_CAPABILITY(can_force_early_return);
- DEL_CAPABILITY(can_get_owned_monitor_stack_depth_info);
- DEL_CAPABILITY(can_get_constant_pool);
- DEL_CAPABILITY(can_set_native_method_prefix);
- DEL_CAPABILITY(can_retransform_classes);
- DEL_CAPABILITY(can_retransform_any_class);
- DEL_CAPABILITY(can_generate_resource_exhaustion_heap_events);
- DEL_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(DEL_CAPABILITY);
#undef DEL_CAPABILITY
gEventHandler.HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
@@ -1236,6 +1216,8 @@ class JvmtiFunctions {
return OK;
}
+#undef FOR_ALL_CAPABILITIES
+
static jvmtiError GetCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
@@ -1341,7 +1323,7 @@ class JvmtiFunctions {
static jvmtiError GetVersionNumber(jvmtiEnv* env, jint* version_ptr) {
ENSURE_VALID_ENV(env);
- *version_ptr = JVMTI_VERSION;
+ *version_ptr = ArtJvmTiEnv::AsArtJvmTiEnv(env)->ti_version;
return OK;
}
@@ -1495,9 +1477,10 @@ static bool IsJvmtiVersion(jint version) {
extern const jvmtiInterface_1 gJvmtiInterface;
-ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
+ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint version)
: art_vm(runtime),
local_data(nullptr),
+ ti_version(version),
capabilities(),
event_info_mutex_("jvmtiEnv_EventInfoMutex") {
object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
@@ -1506,8 +1489,8 @@ ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
// Creates a jvmtiEnv and returns it with the art::ti::Env that is associated with it. new_art_ti
// is a pointer to the uninitialized memory for an art::ti::Env.
-static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
- struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler);
+static void CreateArtJvmTiEnv(art::JavaVMExt* vm, jint version, /*out*/void** new_jvmtiEnv) {
+ struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler, version);
*new_jvmtiEnv = env;
gEventHandler.RegisterArtJvmTiEnv(env);
@@ -1520,8 +1503,14 @@ static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
// places the return value in 'env' if this library can handle the GetEnv request. Otherwise
// returns false and does not modify the 'env' pointer.
static jint GetEnvHandler(art::JavaVMExt* vm, /*out*/void** env, jint version) {
- if (IsJvmtiVersion(version)) {
- CreateArtJvmTiEnv(vm, env);
+ // JavaDebuggable will either be set by the runtime as it is starting up or the plugin if it's
+ // loaded early enough. If this is false we cannot guarantee conformance to all JVMTI behaviors
+ // due to optimizations. We will only allow agents to get ArtTiEnvs using the kArtTiVersion.
+ if (IsFullJvmtiAvailable() && IsJvmtiVersion(version)) {
+ CreateArtJvmTiEnv(vm, JVMTI_VERSION, env);
+ return JNI_OK;
+ } else if (version == kArtTiVersion) {
+ CreateArtJvmTiEnv(vm, kArtTiVersion, env);
return JNI_OK;
} else {
printf("version 0x%x is not valid!", version);
@@ -1546,6 +1535,13 @@ extern "C" bool ArtPlugin_Initialize() {
MethodUtil::Register(&gEventHandler);
SearchUtil::Register();
HeapUtil::Register();
+ Transformer::Setup();
+
+ {
+ // Make sure we can deopt anything we need to.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ gDeoptManager.FinishSetup();
+ }
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 2a8c2e91df..73cc601e3e 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -62,10 +62,22 @@ namespace openjdkjvmti {
class ObjectTagTable;
+// A special version that we use to identify special tooling interface versions which mostly matches
+// the jvmti spec but everything is best effort. This is used to implement the userdebug
+// 'debug-anything' behavior.
+//
+// This is the value 0x70010200.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
// A structure that is a jvmtiEnv with additional information for the runtime.
struct ArtJvmTiEnv : public jvmtiEnv {
art::JavaVMExt* art_vm;
void* local_data;
+
+ // The ti_version we are compatible with. This is only for giving the correct value for GetVersion
+ // when running on a userdebug/eng device.
+ jint ti_version;
+
jvmtiCapabilities capabilities;
EventMasks event_masks;
@@ -90,7 +102,7 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// RW lock to protect access to all of the event data.
art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
+ ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint ti_version);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
return art::down_cast<ArtJvmTiEnv*>(env);
@@ -272,6 +284,60 @@ const jvmtiCapabilities kPotentialCapabilities = {
.can_generate_resource_exhaustion_threads_events = 0,
};
+// These are capabilities that are disabled if we were loaded without being debuggable.
+//
+// This includes the following capabilities:
+// can_retransform_any_class:
+// can_retransform_classes:
+// can_redefine_any_class:
+// can_redefine_classes:
+// We need to ensure that inlined code is either not present or can always be deoptimized. This
+// is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
+// on a threads stack.
+const jvmtiCapabilities kNonDebuggableUnsupportedCapabilities = {
+ .can_tag_objects = 0,
+ .can_generate_field_modification_events = 0,
+ .can_generate_field_access_events = 0,
+ .can_get_bytecodes = 0,
+ .can_get_synthetic_attribute = 0,
+ .can_get_owned_monitor_info = 0,
+ .can_get_current_contended_monitor = 0,
+ .can_get_monitor_info = 0,
+ .can_pop_frame = 0,
+ .can_redefine_classes = 1,
+ .can_signal_thread = 0,
+ .can_get_source_file_name = 0,
+ .can_get_line_numbers = 0,
+ .can_get_source_debug_extension = 0,
+ .can_access_local_variables = 0,
+ .can_maintain_original_method_order = 0,
+ .can_generate_single_step_events = 0,
+ .can_generate_exception_events = 0,
+ .can_generate_frame_pop_events = 0,
+ .can_generate_breakpoint_events = 0,
+ .can_suspend = 0,
+ .can_redefine_any_class = 1,
+ .can_get_current_thread_cpu_time = 0,
+ .can_get_thread_cpu_time = 0,
+ .can_generate_method_entry_events = 0,
+ .can_generate_method_exit_events = 0,
+ .can_generate_all_class_hook_events = 0,
+ .can_generate_compiled_method_load_events = 0,
+ .can_generate_monitor_events = 0,
+ .can_generate_vm_object_alloc_events = 0,
+ .can_generate_native_method_bind_events = 0,
+ .can_generate_garbage_collection_events = 0,
+ .can_generate_object_free_events = 0,
+ .can_force_early_return = 0,
+ .can_get_owned_monitor_stack_depth_info = 0,
+ .can_get_constant_pool = 0,
+ .can_set_native_method_prefix = 0,
+ .can_retransform_classes = 1,
+ .can_retransform_any_class = 1,
+ .can_generate_resource_exhaustion_heap_events = 0,
+ .can_generate_resource_exhaustion_threads_events = 0,
+};
+
} // namespace openjdkjvmti
#endif // ART_OPENJDKJVMTI_ART_JVMTI_H_
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index aced769cb5..9e11a25e58 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -38,11 +38,11 @@
#include "base/enums.h"
#include "base/mutex-inl.h"
#include "dex/dex_file_annotations.h"
+#include "dex/modifiers.h"
#include "events-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
-#include "modifiers.h"
#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
@@ -68,7 +68,9 @@ bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
}
DeoptManager::DeoptManager()
- : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock"),
+ : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
+ static_cast<art::LockLevel>(
+ art::LockLevel::kClassLinkerClassesLock + 1)),
deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
performing_deoptimization_(false),
global_deopt_count_(0),
@@ -91,6 +93,33 @@ void DeoptManager::Shutdown() {
callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
}
+void DeoptManager::FinishSetup() {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, deoptimization_status_lock_);
+
+ art::Runtime* runtime = art::Runtime::Current();
+ // See if we need to do anything.
+ if (!runtime->IsJavaDebuggable()) {
+ // See if we can enable all JVMTI functions. If this is false, only kArtTiVersion agents can be
+ // retrieved and they will all be best-effort.
+ if (PhaseUtil::GetPhaseUnchecked() == JVMTI_PHASE_ONLOAD) {
+ // We are still early enough to change the compiler options and get full JVMTI support.
+ LOG(INFO) << "Openjdkjvmti plugin loaded on a non-debuggable runtime. Changing runtime to "
+ << "debuggable state. Please pass '--debuggable' to dex2oat and "
+ << "'-Xcompiler-option --debuggable' to dalvikvm in the future.";
+ DCHECK(runtime->GetJit() == nullptr) << "Jit should not be running yet!";
+ runtime->AddCompilerOption("--debuggable");
+ runtime->SetJavaDebuggable(true);
+ } else {
+ LOG(WARNING) << "Openjdkjvmti plugin was loaded on a non-debuggable Runtime. Plugin was "
+ << "loaded too late to change runtime state to DEBUGGABLE. Only kArtTiVersion "
+ << "(0x" << std::hex << kArtTiVersion << ") environments are available. Some "
+ << "functionality might not work properly.";
+ }
+ runtime->DeoptimizeBootImage();
+ }
+}
+
bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
return MethodHasBreakpointsLocked(method);
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index b265fa8ec2..a495b6835c 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -101,6 +101,10 @@ class DeoptManager {
void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void FinishSetup()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
static DeoptManager* Get();
private:
@@ -141,9 +145,8 @@ class DeoptManager {
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
- // static constexpr const char* kDeoptManagerThreadName = "JVMTI_DeoptManagerWorkerThread";
- art::Mutex deoptimization_status_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ art::Mutex deoptimization_status_lock_ ACQUIRED_BEFORE(art::Locks::classlinker_classes_lock_);
art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 007669b50f..74ffb84579 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -187,7 +187,7 @@ FORALL_EVENT_TYPES(MAKE_EVENT_HANDLER_FUNC)
template <ArtJvmtiEvent kEvent, typename ...Args>
inline std::vector<impl::EventHandlerFunc<kEvent>> EventHandler::CollectEvents(art::Thread* thread,
Args... args) const {
- art::MutexLock mu(thread, envs_lock_);
+ art::ReaderMutexLock mu(thread, envs_lock_);
std::vector<impl::EventHandlerFunc<kEvent>> handlers;
for (ArtJvmTiEnv* env : envs) {
if (ShouldDispatch<kEvent>(env, thread, args...)) {
@@ -527,7 +527,7 @@ inline bool EventHandler::ShouldDispatch(ArtJvmTiEnv* env,
}
inline void EventHandler::RecalculateGlobalEventMask(ArtJvmtiEvent event) {
- art::MutexLock mu(art::Thread::Current(), envs_lock_);
+ art::WriterMutexLock mu(art::Thread::Current(), envs_lock_);
RecalculateGlobalEventMaskLocked(event);
}
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index d98fda5f9c..62b73c08c0 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -196,12 +196,12 @@ void EventMasks::HandleChangedCapabilities(const jvmtiCapabilities& caps, bool c
}
void EventHandler::RegisterArtJvmTiEnv(ArtJvmTiEnv* env) {
- art::MutexLock mu(art::Thread::Current(), envs_lock_);
+ art::WriterMutexLock mu(art::Thread::Current(), envs_lock_);
envs.push_back(env);
}
void EventHandler::RemoveArtJvmTiEnv(ArtJvmTiEnv* env) {
- art::MutexLock mu(art::Thread::Current(), envs_lock_);
+ art::WriterMutexLock mu(art::Thread::Current(), envs_lock_);
// Since we might be currently iterating over the envs list we cannot actually erase elements.
// Instead we will simply replace them with 'nullptr' and skip them manually.
auto it = std::find(envs.begin(), envs.end(), env);
@@ -1143,7 +1143,7 @@ jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
{
// Change the event masks atomically.
art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, envs_lock_);
+ art::WriterMutexLock mu(self, envs_lock_);
art::WriterMutexLock mu_env_info(self, env->event_info_mutex_);
old_state = global_mask.Test(event);
if (mode == JVMTI_ENABLE) {
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index 81edb931cd..8141eff88c 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -283,7 +283,7 @@ class EventHandler {
ALWAYS_INLINE
inline void RecalculateGlobalEventMask(ArtJvmtiEvent event) REQUIRES(!envs_lock_);
ALWAYS_INLINE
- inline void RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) REQUIRES(envs_lock_);
+ inline void RecalculateGlobalEventMaskLocked(ArtJvmtiEvent event) REQUIRES_SHARED(envs_lock_);
template <ArtJvmtiEvent kEvent>
ALWAYS_INLINE inline void DispatchClassFileLoadHookEvent(art::Thread* thread,
@@ -310,7 +310,8 @@ class EventHandler {
std::list<ArtJvmTiEnv*> envs GUARDED_BY(envs_lock_);
// Top level lock. Nothing at all should be held when we lock this.
- mutable art::Mutex envs_lock_ ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
+ mutable art::ReaderWriterMutex envs_lock_
+ ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
// A union of all enabled events, anywhere.
EventMask global_mask;
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index da7d60ac2f..e9522b3984 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -30,10 +30,13 @@
*/
#include "fixed_up_dex_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "dex/dex_file_verifier.h"
// Runtime includes.
+#include "dex_container.h"
#include "dex/compact_dex_level.h"
#include "dex_to_dex_decompiler.h"
#include "dexlayout.h"
@@ -42,14 +45,13 @@
namespace openjdkjvmti {
-static void RecomputeDexChecksum(art::DexFile* dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void RecomputeDexChecksum(art::DexFile* dex_file) {
reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
dex_file->CalculateChecksum();
}
-static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile& original_dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file) {
const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
if (oat_dex == nullptr) {
return;
@@ -62,17 +64,60 @@ static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile&
if (vdex == nullptr) {
return;
}
- art::VdexFile::UnquickenDexFile(
- new_dex_file, vdex->GetQuickeningInfo(), /* decompile_return_instruction */true);
+ vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
}
-std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& original) {
+static void DCheckVerifyDexFile(const art::DexFile& dex) {
+ if (art::kIsDebugBuild) {
+ std::string error;
+ if (!art::DexFileVerifier::Verify(&dex,
+ dex.Begin(),
+ dex.Size(),
+ "FixedUpDexFile_Verification.dex",
+ /*verify_checksum*/ true,
+ &error)) {
+ LOG(FATAL) << "Failed to verify de-quickened dex file: " << error;
+ }
+ }
+}
+
+std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& original,
+ const char* descriptor) {
// Copy the data into mutable memory.
std::vector<unsigned char> data;
- data.resize(original.Size());
- memcpy(data.data(), original.Begin(), original.Size());
+ std::unique_ptr<const art::DexFile> new_dex_file;
std::string error;
- std::unique_ptr<const art::DexFile> new_dex_file(art::DexFileLoader::Open(
+ const art::ArtDexFileLoader dex_file_loader;
+
+ if (original.IsCompactDexFile()) {
+ // Since we are supposed to return a standard dex, convert back using dexlayout. It's OK to do
+ // this before unquickening.
+ art::Options options;
+ options.compact_dex_level_ = art::CompactDexLevel::kCompactDexLevelNone;
+ // Add a filter to only include the class that has the matching descriptor.
+ static constexpr bool kFilterByDescriptor = true;
+ if (kFilterByDescriptor) {
+ options.class_filter_.insert(descriptor);
+ }
+ art::DexLayout dex_layout(options,
+ /*info*/ nullptr,
+ /*out_file*/ nullptr,
+ /*header*/ nullptr);
+ std::unique_ptr<art::DexContainer> dex_container;
+ dex_layout.ProcessDexFile(original.GetLocation().c_str(),
+ &original,
+ 0,
+ &dex_container);
+ art::DexContainer::Section* main_section = dex_container->GetMainSection();
+ CHECK_EQ(dex_container->GetDataSection()->Size(), 0u);
+ data.insert(data.end(), main_section->Begin(), main_section->End());
+ } else {
+ data.resize(original.Size());
+ memcpy(data.data(), original.Begin(), original.Size());
+ }
+
+ // Open the dex file in the buffer.
+ new_dex_file = dex_file_loader.Open(
data.data(),
data.size(),
/*location*/"Unquickening_dexfile.dex",
@@ -80,41 +125,17 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi
/*oat_dex_file*/nullptr,
/*verify*/false,
/*verify_checksum*/false,
- &error));
- if (new_dex_file.get() == nullptr) {
+ &error);
+
+ if (new_dex_file == nullptr) {
LOG(ERROR) << "Unable to open dex file from memory for unquickening! error: " << error;
return nullptr;
}
DoDexUnquicken(*new_dex_file, original);
- if (original.IsCompactDexFile()) {
- // Since we are supposed to return a standard dex, convert back using dexlayout.
- art::Options options;
- options.output_to_memmap_ = true;
- options.compact_dex_level_ = art::CompactDexLevel::kCompactDexLevelNone;
- options.update_checksum_ = true;
- art::DexLayout dex_layout(options, nullptr, nullptr);
- dex_layout.ProcessDexFile(new_dex_file->GetLocation().c_str(), new_dex_file.get(), 0);
- std::unique_ptr<art::MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
-
- const uint32_t dex_file_size =
- reinterpret_cast<const art::DexFile::Header*>(mem_map->Begin())->file_size_;
- // Overwrite the dex file stored in data with the new result.
- data.clear();
- data.insert(data.end(), mem_map->Begin(), mem_map->Begin() + dex_file_size);
- new_dex_file = art::DexFileLoader::Open(
- data.data(),
- data.size(),
- /*location*/"Unquickening_dexfile.dex",
- /*location_checksum*/0,
- /*oat_dex_file*/nullptr,
- /*verify*/false,
- /*verify_checksum*/false,
- &error);
- }
-
RecomputeDexChecksum(const_cast<art::DexFile*>(new_dex_file.get()));
+ DCheckVerifyDexFile(*new_dex_file);
std::unique_ptr<FixedUpDexFile> ret(new FixedUpDexFile(std::move(new_dex_file), std::move(data)));
return ret;
}
diff --git a/openjdkjvmti/fixed_up_dex_file.h b/openjdkjvmti/fixed_up_dex_file.h
index b8f349cf8c..594e8a7358 100644
--- a/openjdkjvmti/fixed_up_dex_file.h
+++ b/openjdkjvmti/fixed_up_dex_file.h
@@ -49,8 +49,8 @@ namespace openjdkjvmti {
// are running on.
class FixedUpDexFile {
public:
- static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
+ static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original,
+ const char* descriptor);
const art::DexFile& GetDexFile() {
return *dex_file_;
diff --git a/openjdkjvmti/ti_breakpoint.cc b/openjdkjvmti/ti_breakpoint.cc
index fa7a34401d..d5fffdf439 100644
--- a/openjdkjvmti/ti_breakpoint.cc
+++ b/openjdkjvmti/ti_breakpoint.cc
@@ -39,11 +39,11 @@
#include "base/mutex-inl.h"
#include "deopt_manager.h"
#include "dex/dex_file_annotations.h"
+#include "dex/modifiers.h"
#include "events-inl.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
-#include "modifiers.h"
#include "nativehelper/scoped_local_ref.h"
#include "runtime_callbacks.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index f9eb008af2..4d54d756d5 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -42,6 +42,7 @@
#include "class_linker.h"
#include "class_table-inl.h"
#include "common_throws.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_loader.h"
#include "events-inl.h"
@@ -107,12 +108,13 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
std::string map_name = map->GetName();
- std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map_name,
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- &error_msg));
+ const art::ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ &error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
art::ThrowClassFormatError(nullptr,
@@ -182,73 +184,27 @@ struct ClassCallback : public art::ClassLoadCallback {
return;
}
- // Strip the 'L' and ';' from the descriptor
- std::string name(std::string(descriptor).substr(1, strlen(descriptor) - 2));
-
art::Thread* self = art::Thread::Current();
- art::JNIEnvExt* env = self->GetJniEnv();
- ScopedLocalRef<jobject> loader(
- env, class_loader.IsNull() ? nullptr : env->AddLocalReference<jobject>(class_loader.Get()));
- std::unique_ptr<FixedUpDexFile> dex_file_copy(FixedUpDexFile::Create(initial_dex_file));
-
- // Go back to native.
- art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
- // Call all Non-retransformable agents.
- jint post_no_redefine_len = 0;
- unsigned char* post_no_redefine_dex_data = nullptr;
- std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
- post_no_redefine_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
- event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
- self,
- static_cast<JNIEnv*>(env),
- static_cast<jclass>(nullptr), // The class doesn't really exist yet so send null.
- loader.get(),
- name.c_str(),
- static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
- static_cast<jint>(dex_file_copy->Size()),
- static_cast<const unsigned char*>(dex_file_copy->Begin()),
- static_cast<jint*>(&post_no_redefine_len),
- static_cast<unsigned char**>(&post_no_redefine_dex_data));
- if (post_no_redefine_dex_data == nullptr) {
- DCHECK_EQ(post_no_redefine_len, 0);
- post_no_redefine_dex_data = const_cast<unsigned char*>(dex_file_copy->Begin());
- post_no_redefine_len = dex_file_copy->Size();
- } else {
- post_no_redefine_unique_ptr =
- std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
- post_no_redefine_dex_data, FakeJvmtiDeleter<const unsigned char>());
- DCHECK_GT(post_no_redefine_len, 0);
+ ArtClassDefinition def;
+ def.InitFirstLoad(descriptor, class_loader, initial_dex_file);
+
+ // Call all non-retransformable agents.
+ Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
+ event_handler, self, &def);
+
+ std::vector<unsigned char> post_non_retransform;
+ if (def.IsModified()) {
+ // Copy the dex data after the non-retransformable events.
+ post_non_retransform.resize(def.GetDexData().size());
+ memcpy(post_non_retransform.data(), def.GetDexData().data(), post_non_retransform.size());
}
+
// Call all retransformable agents.
- jint final_len = 0;
- unsigned char* final_dex_data = nullptr;
- std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>
- final_dex_unique_ptr(nullptr, FakeJvmtiDeleter<const unsigned char>());
- event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
- self,
- static_cast<JNIEnv*>(env),
- static_cast<jclass>(nullptr), // The class doesn't really exist yet so send null.
- loader.get(),
- name.c_str(),
- static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
- static_cast<jint>(post_no_redefine_len),
- static_cast<const unsigned char*>(post_no_redefine_dex_data),
- static_cast<jint*>(&final_len),
- static_cast<unsigned char**>(&final_dex_data));
- if (final_dex_data == nullptr) {
- DCHECK_EQ(final_len, 0);
- final_dex_data = post_no_redefine_dex_data;
- final_len = post_no_redefine_len;
- } else {
- final_dex_unique_ptr =
- std::unique_ptr<const unsigned char, FakeJvmtiDeleter<const unsigned char>>(
- final_dex_data, FakeJvmtiDeleter<const unsigned char>());
- DCHECK_GT(final_len, 0);
- }
+ Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ event_handler, self, &def);
- if (final_dex_data != dex_file_copy->Begin()) {
+ if (def.IsModified()) {
LOG(WARNING) << "Changing class " << descriptor;
- art::ScopedObjectAccess soa(self);
art::StackHandleScope<2> hs(self);
// Save the results of all the non-retransformable agents.
// First allocate the ClassExt
@@ -265,7 +221,7 @@ struct ClassCallback : public art::ClassLoadCallback {
// Allocate the byte array to store the dex file bytes in.
art::MutableHandle<art::mirror::Object> arr(hs.NewHandle<art::mirror::Object>(nullptr));
- if (post_no_redefine_dex_data == dex_file_copy->Begin() && name != "java/lang/Long") {
+ if (post_non_retransform.empty() && strcmp(descriptor, "Ljava/lang/Long;") != 0) {
// we didn't have any non-retransformable agents. We can just cache a pointer to the
// initial_dex_file. It will be kept live by the class_loader.
jlong dex_ptr = reinterpret_cast<uintptr_t>(&initial_dex_file);
@@ -275,8 +231,8 @@ struct ClassCallback : public art::ClassLoadCallback {
} else {
arr.Assign(art::mirror::ByteArray::AllocateAndFill(
self,
- reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
- post_no_redefine_len));
+ reinterpret_cast<const signed char*>(post_non_retransform.data()),
+ post_non_retransform.size()));
}
if (arr.IsNull()) {
LOG(WARNING) << "Unable to allocate memory for initial dex-file. Aborting transformation";
@@ -287,8 +243,8 @@ struct ClassCallback : public art::ClassLoadCallback {
std::unique_ptr<const art::DexFile> dex_file(MakeSingleDexFile(self,
descriptor,
initial_dex_file.GetLocation(),
- final_len,
- final_dex_data));
+ def.GetDexData().size(),
+ def.GetDexData().data()));
if (dex_file.get() == nullptr) {
return;
}
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 6560570136..1b641cd905 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -45,30 +45,80 @@
namespace openjdkjvmti {
+void ArtClassDefinition::InitializeMemory() const {
+ DCHECK(art::MemMap::kCanReplaceMapping);
+ VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
+ CHECK(dex_data_mmap_ != nullptr);
+ CHECK(temp_mmap_ != nullptr);
+ CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+
+ std::string desc = std::string("L") + name_ + ";";
+ std::unique_ptr<FixedUpDexFile>
+ fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
+ CHECK(fixed_dex_file.get() != nullptr);
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
+ CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ // Copy the data to the temp mmap.
+ memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+
+ // Move the mmap atomically.
+ art::MemMap* source = temp_mmap_.release();
+ std::string error;
+ CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_->Protect(PROT_READ));
+}
+
bool ArtClassDefinition::IsModified() const {
- // RedefineClasses calls always are 'modified' since they need to change the original_dex_file of
+ // RedefineClasses calls always are 'modified' since they need to change the current_dex_file of
// the class.
if (redefined_) {
return true;
}
+
+ // Check to see if any change has taken place.
+ if (current_dex_file_.data() == dex_data_.data()) {
+ // no change at all.
+ return false;
+ }
+
+ // The dex_data_ was never touched by the agents.
+ if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ // the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
+ // initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
+ // at the initial dex_data_.
+ return true;
+ } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ // The dex file used to have modifications but they were not added again.
+ return true;
+ } else {
+ // It's not clear what happened. It's possible that the agent got the current dex file data
+ // from some other source so we need to initialize everything to see if it is the same.
+ VLOG(signals) << "Lazy dex file for " << name_ << " was never touched but the dex_data_ is "
+ << "changed! Need to initialize the memory to see if anything changed";
+ InitializeMemory();
+ }
+ }
+
+ // We can definitely read current_dex_file_ and dex_file_ without causing page faults.
+
// Check if the dex file we want to set is the same as the current one.
// Unfortunately we need to do this check even if no modifications have been done since it could
// be that agents were removed in the mean-time so we still have a different dex file. The dex
// checksum means this is likely to be fairly fast.
- return static_cast<jint>(original_dex_file_.size()) != dex_len_ ||
- memcmp(original_dex_file_.data(), dex_data_.get(), dex_len_) != 0;
+ return current_dex_file_.size() != dex_data_.size() ||
+ memcmp(current_dex_file_.data(), dex_data_.data(), current_dex_file_.size()) != 0;
}
-jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
- JNIEnv* jni_env = GetJniEnv(env);
- if (jni_env == nullptr) {
- return ERR(INTERNAL);
- }
- art::ScopedObjectAccess soa(jni_env);
+jvmtiError ArtClassDefinition::InitCommon(art::Thread* self, jclass klass) {
+ art::ScopedObjectAccess soa(self);
art::ObjPtr<art::mirror::Class> m_klass(soa.Decode<art::mirror::Class>(klass));
if (m_klass.IsNull()) {
return ERR(INVALID_CLASS);
}
+ initialized_ = true;
klass_ = klass;
loader_ = soa.AddLocalReference<jobject>(m_klass->GetClassLoader());
std::string descriptor_store;
@@ -79,11 +129,18 @@ jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
return OK;
}
+static void DequickenDexFile(const art::DexFile* dex_file,
+ const char* descriptor,
+ /*out*/std::vector<unsigned char>* dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ std::unique_ptr<FixedUpDexFile> fixed_dex_file(FixedUpDexFile::Create(*dex_file, descriptor));
+ dex_data->resize(fixed_dex_file->Size());
+ memcpy(dex_data->data(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+}
+
// Gets the data surrounding the given class.
-static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
- art::Handle<art::mirror::Class> klass,
- /*out*/jint* dex_data_len,
- /*out*/unsigned char** dex_data)
+static void GetDexDataForRetransformation(art::Handle<art::mirror::Class> klass,
+ /*out*/std::vector<unsigned char>* dex_data)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
art::StackHandleScope<3> hs(art::Thread::Current());
art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->GetExtData()));
@@ -95,12 +152,9 @@ static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
DCHECK(orig_dex->GetClass()->GetComponentType()->IsPrimitiveByte());
art::Handle<art::mirror::ByteArray> orig_dex_bytes(
hs.NewHandle(art::down_cast<art::mirror::ByteArray*>(orig_dex->AsArray())));
- *dex_data_len = static_cast<jint>(orig_dex_bytes->GetLength());
- return CopyDataIntoJvmtiBuffer(
- env,
- reinterpret_cast<const unsigned char*>(orig_dex_bytes->GetData()),
- *dex_data_len,
- /*out*/dex_data);
+ dex_data->resize(orig_dex_bytes->GetLength());
+ memcpy(dex_data->data(), orig_dex_bytes->GetData(), dex_data->size());
+ return;
} else if (orig_dex->IsDexCache()) {
dex_file = orig_dex->AsDexCache()->GetDexFile();
} else {
@@ -113,7 +167,7 @@ static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
art::JValue val;
if (!art::UnboxPrimitiveForResult(orig_dex.Get(), prim_long_class, &val)) {
// This should never happen.
- return ERR(INTERNAL);
+ LOG(FATAL) << "Unable to unbox a primitive long value!";
}
dex_file = reinterpret_cast<const art::DexFile*>(static_cast<uintptr_t>(val.GetJ()));
}
@@ -122,58 +176,201 @@ static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
if (dex_file == nullptr) {
dex_file = &klass->GetDexFile();
}
- std::unique_ptr<FixedUpDexFile> fixed_dex_file(FixedUpDexFile::Create(*dex_file));
- *dex_data_len = static_cast<jint>(fixed_dex_file->Size());
- return CopyDataIntoJvmtiBuffer(env,
- fixed_dex_file->Begin(),
- fixed_dex_file->Size(),
- /*out*/dex_data);
+ std::string storage;
+ DequickenDexFile(dex_file, klass->GetDescriptor(&storage), dex_data);
+}
+
+static bool DexNeedsDequickening(art::Handle<art::mirror::Class> klass,
+ /*out*/ bool* from_class_ext)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::ClassExt> ext(klass->GetExtData());
+ if (ext.IsNull()) {
+ // We don't seem to have ever been redefined so be conservative and say we need de-quickening.
+ *from_class_ext = false;
+ return true;
+ }
+ art::ObjPtr<art::mirror::Object> orig_dex(ext->GetOriginalDexFile());
+ if (orig_dex.IsNull()) {
+ // We don't seem to have ever been redefined so be conservative and say we need de-quickening.
+ *from_class_ext = false;
+ return true;
+ } else if (!orig_dex->IsArrayInstance()) {
+ // We were redefined but the original is held in a dex-cache or dex file. This means that the
+ // original dex file is the one from the disk, which might be quickened.
+ DCHECK(orig_dex->IsDexCache() || orig_dex->GetClass()->DescriptorEquals("Ljava/lang/Long;"));
+ *from_class_ext = true;
+ return true;
+ } else {
+ // An array instance means the original-dex-file is from a redefineClasses which cannot have any
+ // quickening, so it's fine to use directly.
+ DCHECK(orig_dex->GetClass()->GetComponentType()->IsPrimitiveByte());
+ *from_class_ext = true;
+ return false;
+ }
}
-jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, jclass klass) {
- jvmtiError res = InitCommon(env, klass);
+static const art::DexFile* GetQuickenedDexFile(art::Handle<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::ClassExt> ext(klass->GetExtData());
+ if (ext.IsNull() || ext->GetOriginalDexFile() == nullptr) {
+ return &klass->GetDexFile();
+ }
+
+ art::ObjPtr<art::mirror::Object> orig_dex(ext->GetOriginalDexFile());
+ DCHECK(!orig_dex->IsArrayInstance());
+ if (orig_dex->IsDexCache()) {
+ return orig_dex->AsDexCache()->GetDexFile();
+ }
+
+ DCHECK(orig_dex->GetClass()->DescriptorEquals("Ljava/lang/Long;"))
+ << "Expected java/lang/Long but found object of type "
+ << orig_dex->GetClass()->PrettyClass();
+ art::ObjPtr<art::mirror::Class> prim_long_class(
+ art::Runtime::Current()->GetClassLinker()->GetClassRoot(
+ art::ClassLinker::kPrimitiveLong));
+ art::JValue val;
+ if (!art::UnboxPrimitiveForResult(orig_dex.Ptr(), prim_long_class, &val)) {
+ LOG(FATAL) << "Unable to unwrap a long value!";
+ }
+ return reinterpret_cast<const art::DexFile*>(static_cast<uintptr_t>(val.GetJ()));
+}
+
+template<typename GetOriginalDexFile>
+void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
+ const art::DexFile* quick_dex) {
+ art::Thread* self = art::Thread::Current();
+ DCHECK(quick_dex != nullptr);
+ if (art::MemMap::kCanReplaceMapping && kEnableOnDemandDexDequicken) {
+ size_t dequick_size = quick_dex->GetDequickenedSize();
+ std::string mmap_name("anon-mmap-for-redefine: ");
+ mmap_name += name_;
+ std::string error;
+ dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_NONE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ mmap_name += "-TEMP";
+ temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
+ initial_dex_file_unquickened_ = quick_dex;
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dex_data_mmap_->Size());
+ if (from_class_ext_) {
+ // We got initial from class_ext so the current one must have undergone redefinition so no
+ // cdex or quickening stuff.
+ // We can only do this if it's not a first load.
+ DCHECK(klass_ != nullptr);
+ const art::DexFile& cur_dex = self->DecodeJObject(klass_)->AsClass()->GetDexFile();
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ } else {
+ // This class hasn't been redefined before. The dequickened current data is the same as the
+ // dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
+ // not be cleared until after everything is done.
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dequick_size);
+ }
+ return;
+ }
+ }
+ dex_data_mmap_.reset(nullptr);
+ temp_mmap_.reset(nullptr);
+ // Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
+ // unfortunate. Since currently the size is just a guess though we might as well try to do it
+ // manually.
+ get_original(/*out*/&dex_data_memory_);
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
+ if (from_class_ext_) {
+ // We got initial from class_ext so the current one must have undergone redefinition so no
+ // cdex or quickening stuff.
+ // We can only do this if it's not a first load.
+ DCHECK(klass_ != nullptr);
+ const art::DexFile& cur_dex = self->DecodeJObject(klass_)->AsClass()->GetDexFile();
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ } else {
+ // No redefinition must have ever happened so the (dequickened) cur_dex is the same as the
+ // initial dex_data. We need to copy it into another buffer to keep it around if we have a
+ // real redefinition.
+ current_dex_memory_.resize(dex_data_.size());
+ memcpy(current_dex_memory_.data(), dex_data_.data(), current_dex_memory_.size());
+ current_dex_file_ = art::ArrayRef<const unsigned char>(current_dex_memory_);
+ }
+}
+
+jvmtiError ArtClassDefinition::Init(art::Thread* self, jclass klass) {
+ jvmtiError res = InitCommon(self, klass);
if (res != OK) {
return res;
}
- unsigned char* new_data = nullptr;
- art::Thread* self = art::Thread::Current();
art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
art::Handle<art::mirror::Class> m_klass(hs.NewHandle(self->DecodeJObject(klass)->AsClass()));
- res = GetDexDataForRetransformation(env, m_klass, &dex_len_, &new_data);
- if (res != OK) {
- return res;
- }
- dex_data_ = MakeJvmtiUniquePtr(env, new_data);
- if (m_klass->GetExtData() == nullptr || m_klass->GetExtData()->GetOriginalDexFile() == nullptr) {
- // We have never redefined class this yet. Keep track of what the (de-quickened) dex file looks
- // like so we can tell if anything has changed. Really we would like to just always do the
- // 'else' block but the fact that we de-quickened stuff screws us over.
- unsigned char* original_data_memory = nullptr;
- res = CopyDataIntoJvmtiBuffer(env, dex_data_.get(), dex_len_, &original_data_memory);
- original_dex_file_memory_ = MakeJvmtiUniquePtr(env, original_data_memory);
- original_dex_file_ = art::ArrayRef<const unsigned char>(original_data_memory, dex_len_);
- } else {
- // We know that we have been redefined at least once (there is an original_dex_file set in
- // the class) so we can just use the current dex file directly.
- const art::DexFile& dex_file = m_klass->GetDexFile();
- original_dex_file_ = art::ArrayRef<const unsigned char>(dex_file.Begin(), dex_file.Size());
+ if (!DexNeedsDequickening(m_klass, &from_class_ext_)) {
+ // We don't need to do any dequickening. We want to copy the data just so we don't need to deal
+ // with the GC moving it around.
+ art::ObjPtr<art::mirror::ByteArray> orig_dex(
+ m_klass->GetExtData()->GetOriginalDexFile()->AsByteArray());
+ dex_data_memory_.resize(orig_dex->GetLength());
+ memcpy(dex_data_memory_.data(), orig_dex->GetData(), dex_data_memory_.size());
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
+
+ // Since we are here we must not have any quickened instructions since we were redefined.
+ const art::DexFile& cur_dex = m_klass->GetDexFile();
+ DCHECK(from_class_ext_);
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ return OK;
}
- return res;
+
+ // We need to dequicken stuff. This is often super slow (10's of ms). Instead we will do it
+ // dynamically.
+ const art::DexFile* quick_dex = GetQuickenedDexFile(m_klass);
+ auto get_original = [&](/*out*/std::vector<unsigned char>* dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ GetDexDataForRetransformation(m_klass, dex_data);
+ };
+ InitWithDex(get_original, quick_dex);
+ return OK;
}
-jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def) {
- jvmtiError res = InitCommon(env, def.klass);
+jvmtiError ArtClassDefinition::Init(art::Thread* self, const jvmtiClassDefinition& def) {
+ jvmtiError res = InitCommon(self, def.klass);
if (res != OK) {
return res;
}
- unsigned char* new_data = nullptr;
- original_dex_file_ = art::ArrayRef<const unsigned char>(def.class_bytes, def.class_byte_count);
+ // We are being directly redefined.
redefined_ = true;
- dex_len_ = def.class_byte_count;
- res = CopyDataIntoJvmtiBuffer(env, def.class_bytes, def.class_byte_count, /*out*/ &new_data);
- dex_data_ = MakeJvmtiUniquePtr(env, new_data);
- return res;
+ current_dex_file_ = art::ArrayRef<const unsigned char>(def.class_bytes, def.class_byte_count);
+ dex_data_ = art::ArrayRef<const unsigned char>(def.class_bytes, def.class_byte_count);
+ return OK;
+}
+
+void ArtClassDefinition::InitFirstLoad(const char* descriptor,
+ art::Handle<art::mirror::ClassLoader> klass_loader,
+ const art::DexFile& dex_file) {
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self);
+ initialized_ = true;
+ // No Class
+ klass_ = nullptr;
+ loader_ = soa.AddLocalReference<jobject>(klass_loader.Get());
+ std::string descriptor_str(descriptor);
+ name_ = descriptor_str.substr(1, descriptor_str.size() - 2);
+ // Android doesn't really have protection domains.
+ protection_domain_ = nullptr;
+ auto get_original = [&](/*out*/std::vector<unsigned char>* dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DequickenDexFile(&dex_file, descriptor, dex_data);
+ };
+ InitWithDex(get_original, &dex_file);
}
} // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index accc456957..31c3611e72 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -32,9 +32,14 @@
#ifndef ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
#define ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
#include "art_jvmti.h"
#include "base/array_ref.h"
+#include "mem_map.h"
namespace openjdkjvmti {
@@ -43,46 +48,67 @@ namespace openjdkjvmti {
// redefinition/retransformation function that created it.
class ArtClassDefinition {
public:
+ // If we support doing a on-demand dex-dequickening using signal handlers.
+ static constexpr bool kEnableOnDemandDexDequicken = true;
+
ArtClassDefinition()
: klass_(nullptr),
loader_(nullptr),
name_(),
protection_domain_(nullptr),
- dex_len_(0),
- dex_data_(nullptr),
- original_dex_file_memory_(nullptr),
- original_dex_file_(),
- redefined_(false) {}
-
- jvmtiError Init(ArtJvmTiEnv* env, jclass klass);
- jvmtiError Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def);
+ dex_data_mmap_(nullptr),
+ temp_mmap_(nullptr),
+ dex_data_memory_(),
+ initial_dex_file_unquickened_(nullptr),
+ dex_data_(),
+ current_dex_memory_(),
+ current_dex_file_(),
+ redefined_(false),
+ from_class_ext_(false),
+ initialized_(false) {}
+
+ void InitFirstLoad(const char* descriptor,
+ art::Handle<art::mirror::ClassLoader> klass_loader,
+ const art::DexFile& dex_file);
+ jvmtiError Init(art::Thread* self, jclass klass);
+ jvmtiError Init(art::Thread* self, const jvmtiClassDefinition& def);
ArtClassDefinition(ArtClassDefinition&& o) = default;
ArtClassDefinition& operator=(ArtClassDefinition&& o) = default;
- void SetNewDexData(ArtJvmTiEnv* env, jint new_dex_len, unsigned char* new_dex_data) {
+ void SetNewDexData(jint new_dex_len, unsigned char* new_dex_data) {
DCHECK(IsInitialized());
if (new_dex_data == nullptr) {
return;
- } else if (new_dex_data != dex_data_.get() || new_dex_len != dex_len_) {
- dex_len_ = new_dex_len;
- dex_data_ = MakeJvmtiUniquePtr(env, new_dex_data);
+ } else {
+ art::ArrayRef<const unsigned char> new_data(new_dex_data, new_dex_len);
+ if (new_data != dex_data_) {
+ dex_data_memory_.resize(new_dex_len);
+ memcpy(dex_data_memory_.data(), new_dex_data, new_dex_len);
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
+ }
}
}
art::ArrayRef<const unsigned char> GetNewOriginalDexFile() const {
DCHECK(IsInitialized());
if (redefined_) {
- return original_dex_file_;
+ return current_dex_file_;
} else {
return art::ArrayRef<const unsigned char>();
}
}
- bool IsModified() const;
+ bool ContainsAddress(uintptr_t ptr) const {
+ return dex_data_mmap_ != nullptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ }
+
+ bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsInitialized() const {
- return klass_ != nullptr;
+ return initialized_;
}
jclass GetClass() const {
@@ -100,6 +126,13 @@ class ArtClassDefinition {
return name_;
}
+ bool IsLazyDefinition() const {
+ DCHECK(IsInitialized());
+ return dex_data_mmap_ != nullptr &&
+ dex_data_.data() == dex_data_mmap_->Begin() &&
+ dex_data_mmap_->GetProtect() == PROT_NONE;
+ }
+
jobject GetProtectionDomain() const {
DCHECK(IsInitialized());
return protection_domain_;
@@ -107,22 +140,53 @@ class ArtClassDefinition {
art::ArrayRef<const unsigned char> GetDexData() const {
DCHECK(IsInitialized());
- return art::ArrayRef<const unsigned char>(dex_data_.get(), dex_len_);
+ return dex_data_;
}
+ void InitializeMemory() const;
+
private:
- jvmtiError InitCommon(ArtJvmTiEnv* env, jclass klass);
+ jvmtiError InitCommon(art::Thread* self, jclass klass);
+
+ template<typename GetOriginalDexFile>
+ void InitWithDex(GetOriginalDexFile get_original, const art::DexFile* quick_dex)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
jclass klass_;
jobject loader_;
std::string name_;
jobject protection_domain_;
- jint dex_len_;
- JvmtiUniquePtr<unsigned char> dex_data_;
- JvmtiUniquePtr<unsigned char> original_dex_file_memory_;
- art::ArrayRef<const unsigned char> original_dex_file_;
+
+ // Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
+ // de-compact-dex'd
+ mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ // This is a temporary mmap we will use to be able to fill the dex file data atomically.
+ mutable std::unique_ptr<art::MemMap> temp_mmap_;
+
+ // A unique_ptr to the current dex_data if it needs to be cleaned up.
+ std::vector<unsigned char> dex_data_memory_;
+
+ const art::DexFile* initial_dex_file_unquickened_;
+
+ // A ref to the current dex data. This is either dex_data_memory_, or current_dex_file_. This is
+ // what the dex file will be turned into.
+ art::ArrayRef<const unsigned char> dex_data_;
+
+ // This is only used if we failed to create a mmap to store the dequickened data
+ std::vector<unsigned char> current_dex_memory_;
+
+ // This is a dequickened version of what is loaded right now. It is either current_dex_memory_ (if
+ // no other redefinition has ever happened to this) or the current dex_file_ directly (if this
+ // class has been redefined, thus it cannot have any quickened stuff).
+ art::ArrayRef<const unsigned char> current_dex_file_;
+
bool redefined_;
+ // If we got the initial dex_data_ from a class_ext
+ bool from_class_ext_;
+
+ bool initialized_;
+
DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
};
diff --git a/openjdkjvmti/ti_class_loader.h b/openjdkjvmti/ti_class_loader.h
index 27ea3f5191..ceb7b331de 100644
--- a/openjdkjvmti/ti_class_loader.h
+++ b/openjdkjvmti/ti_class_loader.h
@@ -41,6 +41,7 @@
#include "base/array_slice.h"
#include "class_linker.h"
#include "dex/dex_file.h"
+#include "dex/utf.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -60,7 +61,6 @@
#include "thread_list.h"
#include "ti_class_definition.h"
#include "transform.h"
-#include "utf.h"
#include "utils/dex_cache_arrays_layout-inl.h"
namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_field.cc b/openjdkjvmti/ti_field.cc
index db5c31c43d..c016966d21 100644
--- a/openjdkjvmti/ti_field.cc
+++ b/openjdkjvmti/ti_field.cc
@@ -35,9 +35,9 @@
#include "art_jvmti.h"
#include "base/enums.h"
#include "dex/dex_file_annotations.h"
+#include "dex/modifiers.h"
#include "jni_internal.h"
#include "mirror/object_array-inl.h"
-#include "modifiers.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 57fb699435..83d64ef1d8 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -40,6 +40,7 @@
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_types.h"
+#include "dex/modifiers.h"
#include "events-inl.h"
#include "jit/jit.h"
#include "jni_internal.h"
@@ -47,7 +48,6 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "modifiers.h"
#include "nativehelper/scoped_local_ref.h"
#include "oat_file.h"
#include "runtime_callbacks.h"
@@ -123,7 +123,7 @@ jvmtiError MethodUtil::GetBytecodes(jvmtiEnv* env,
}
art::ScopedObjectAccess soa(art::Thread::Current());
- art::CodeItemInstructionAccessor accessor(art_method);
+ art::CodeItemInstructionAccessor accessor(art_method->DexInstructions());
if (!accessor.HasCodeItem()) {
*size_ptr = 0;
*bytecode_ptr = nullptr;
@@ -168,7 +168,7 @@ jvmtiError MethodUtil::GetArgumentsSize(jvmtiEnv* env ATTRIBUTE_UNUSED,
}
DCHECK_NE(art_method->GetCodeItemOffset(), 0u);
- *size_ptr = art::CodeItemDataAccessor(art_method).InsSize();
+ *size_ptr = art_method->DexInstructionData().InsSize();
return ERR(NONE);
}
@@ -200,7 +200,7 @@ jvmtiError MethodUtil::GetLocalVariableTable(jvmtiEnv* env,
// TODO HasCodeItem == false means that the method is abstract (or native, but we check that
// earlier). We should check what is returned by the RI in this situation since it's not clear
// what the appropriate return value is from the spec.
- art::CodeItemDebugInfoAccessor accessor(art_method);
+ art::CodeItemDebugInfoAccessor accessor(art_method->DexInstructionDebugInfo());
if (!accessor.HasCodeItem()) {
return ERR(ABSENT_INFORMATION);
}
@@ -301,7 +301,7 @@ jvmtiError MethodUtil::GetMaxLocals(jvmtiEnv* env ATTRIBUTE_UNUSED,
}
DCHECK_NE(art_method->GetCodeItemOffset(), 0u);
- *max_ptr = art::CodeItemDataAccessor(art_method).RegistersSize();
+ *max_ptr = art_method->DexInstructionData().RegistersSize();
return ERR(NONE);
}
@@ -480,7 +480,7 @@ jvmtiError MethodUtil::GetLineNumberTable(jvmtiEnv* env,
return ERR(NULL_POINTER);
}
- accessor = art::CodeItemDebugInfoAccessor(art_method);
+ accessor = art::CodeItemDebugInfoAccessor(art_method->DexInstructionDebugInfo());
dex_file = art_method->GetDexFile();
DCHECK(accessor.HasCodeItem()) << art_method->PrettyMethod() << " " << dex_file->GetLocation();
}
@@ -567,7 +567,7 @@ class CommonLocalVariableClosure : public art::Closure {
// TODO It might be useful to fake up support for get at least on proxy frames.
result_ = ERR(OPAQUE_FRAME);
return;
- } else if (art::CodeItemDataAccessor(method).RegistersSize() <= slot_) {
+ } else if (method->DexInstructionData().RegistersSize() <= slot_) {
result_ = ERR(INVALID_SLOT);
return;
}
@@ -618,7 +618,7 @@ class CommonLocalVariableClosure : public art::Closure {
if (dex_file == nullptr) {
return ERR(OPAQUE_FRAME);
}
- art::CodeItemDebugInfoAccessor accessor(method);
+ art::CodeItemDebugInfoAccessor accessor(method->DexInstructionDebugInfo());
if (!accessor.HasCodeItem()) {
return ERR(OPAQUE_FRAME);
}
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 6194d1e42c..c3fb946b9a 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -43,6 +43,7 @@
#include "base/stringpiece.h"
#include "class_linker-inl.h"
#include "debugger.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
@@ -350,15 +351,14 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
memcpy(class_bytes_copy, definitions[i].class_bytes, definitions[i].class_byte_count);
ArtClassDefinition def;
- res = def.Init(env, definitions[i]);
+ res = def.Init(self, definitions[i]);
if (res != OK) {
return res;
}
def_vector.push_back(std::move(def));
}
// Call all the transformation events.
- jvmtiError res = Transformer::RetransformClassesDirect(env,
- event_handler,
+ jvmtiError res = Transformer::RetransformClassesDirect(event_handler,
self,
&def_vector);
if (res != OK) {
@@ -426,12 +426,13 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
return ERR(INVALID_CLASS_FORMAT);
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map->GetName(),
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- error_msg_));
+ const art::ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ error_msg_));
if (dex_file.get() == nullptr) {
os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
*error_msg_ = os.str();
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index b537e1b01c..c920707afd 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -41,6 +41,7 @@
#include "base/array_ref.h"
#include "class_linker.h"
#include "dex/dex_file.h"
+#include "dex/utf.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -60,7 +61,6 @@
#include "thread_list.h"
#include "ti_class_definition.h"
#include "transform.h"
-#include "utf.h"
#include "utils/dex_cache_arrays_layout-inl.h"
namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 9d5f4ea3f9..cbb7b53bff 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -38,6 +38,7 @@
#include "base/enums.h"
#include "base/macros.h"
#include "class_linker.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "jni_internal.h"
@@ -227,7 +228,8 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
- if (!art::DexFileLoader::Open(
+ const art::ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(
segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index bc77753f8f..373944f179 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -1045,7 +1045,7 @@ jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth)
if (shadow_frame == nullptr) {
needs_instrument = true;
const size_t frame_id = visitor.GetFrameId();
- const uint16_t num_regs = art::CodeItemDataAccessor(method).RegistersSize();
+ const uint16_t num_regs = method->DexInstructionData().RegistersSize();
shadow_frame = target->FindOrCreateDebuggerShadowFrame(frame_id,
num_regs,
method,
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 3a5fcccf35..dc9f69a96a 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -29,6 +29,9 @@
* questions.
*/
+#include <stddef.h>
+#include <sys/types.h>
+
#include <unordered_map>
#include <unordered_set>
@@ -39,7 +42,9 @@
#include "class_linker.h"
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
+#include "dex/utf.h"
#include "events-inl.h"
+#include "fault_handler.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -58,32 +63,228 @@
#include "thread_list.h"
#include "ti_redefine.h"
#include "transform.h"
-#include "utf.h"
#include "utils/dex_cache_arrays_layout-inl.h"
namespace openjdkjvmti {
+// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
+class TransformationFaultHandler FINAL : public art::FaultHandler {
+ public:
+ explicit TransformationFaultHandler(art::FaultManager* manager)
+ : art::FaultHandler(manager),
+ uninitialized_class_definitions_lock_("JVMTI Initialized class definitions lock",
+ art::LockLevel::kSignalHandlingLock),
+ class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
+ uninitialized_class_definitions_lock_) {
+ manager->AddHandler(this, /* generated_code */ false);
+ }
+
+ ~TransformationFaultHandler() {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.clear();
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ DCHECK_EQ(sig, SIGSEGV);
+ art::Thread* self = art::Thread::Current();
+ if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
+ if (self != nullptr) {
+ LOG(FATAL) << "Recursive call into Transformation fault handler!";
+ UNREACHABLE();
+ } else {
+ LOG(ERROR) << "Possible deadlock due to recursive signal delivery of segv.";
+ }
+ }
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(siginfo->si_addr);
+ ArtClassDefinition* res = nullptr;
+
+ {
+ // NB Technically using a mutex and condition variables here is non-posix compliant but
+ // everything should be fine since both glibc and bionic implementations of mutexs and
+ // condition variables work fine so long as the thread was not interrupted during a
+ // lock/unlock (which it wasn't) on all architectures we care about.
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ auto it = std::find_if(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); });
+ if (it != uninitialized_class_definitions_.end()) {
+ res = *it;
+ // Remove the class definition.
+ uninitialized_class_definitions_.erase(it);
+ // Put it in the initializing list
+ initializing_class_definitions_.push_back(res);
+ } else {
+ // Wait for the ptr to be initialized (if it is currently initializing).
+ while (DefinitionIsInitializing(ptr)) {
+ WaitForClassInitializationToFinish();
+ }
+ // Return true (continue with user code) if we find that the definition has been
+ // initialized. Return false (continue on to next signal handler) if the definition is not
+ // initialized or found.
+ return std::find_if(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ uninitialized_class_definitions_.end();
+ }
+ }
+
+ if (LIKELY(self != nullptr)) {
+ CHECK_EQ(self->GetState(), art::ThreadState::kNative)
+ << "Transformation fault handler occurred outside of native mode";
+ }
+
+ VLOG(signals) << "Lazy initialization of dex file for transformation of " << res->GetName()
+ << " during SEGV";
+ res->InitializeMemory();
+
+ {
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ // Move to initialized state and notify waiters.
+ initializing_class_definitions_.erase(std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ res));
+ initialized_class_definitions_.push_back(res);
+ class_definition_initialized_cond_.Broadcast(self);
+ }
+
+ return true;
+ }
+
+ void RemoveDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ auto it = std::find(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ def);
+ if (it != uninitialized_class_definitions_.end()) {
+ uninitialized_class_definitions_.erase(it);
+ return;
+ }
+ while (std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ def) != initializing_class_definitions_.end()) {
+ WaitForClassInitializationToFinish();
+ }
+ it = std::find(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ def);
+ CHECK(it != initialized_class_definitions_.end()) << "Could not find class definition for "
+ << def->GetName();
+ initialized_class_definitions_.erase(it);
+ }
+
+ void AddArtDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ DCHECK(def->IsLazyDefinition());
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.push_back(def);
+ }
+
+ private:
+ bool DefinitionIsInitializing(uintptr_t ptr) REQUIRES(uninitialized_class_definitions_lock_) {
+ return std::find_if(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ initializing_class_definitions_.end();
+ }
+
+ void WaitForClassInitializationToFinish() REQUIRES(uninitialized_class_definitions_lock_) {
+ class_definition_initialized_cond_.Wait(art::Thread::Current());
+ }
+
+ art::Mutex uninitialized_class_definitions_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
+ art::ConditionVariable class_definition_initialized_cond_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of the class definitions that have a non-readable map.
+ std::vector<ArtClassDefinition*> uninitialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are currently undergoing unquickening. Threads should wait
+ // until the definition is no longer in this before returning.
+ std::vector<ArtClassDefinition*> initializing_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are already unquickened. Threads should immediately return if
+ // it is here.
+ std::vector<ArtClassDefinition*> initialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+};
+
+static TransformationFaultHandler* gTransformFaultHandler = nullptr;
+
+void Transformer::Setup() {
+ // Although we create this the fault handler is actually owned by the 'art::fault_manager' which
+ // will take care of destroying it.
+ if (art::MemMap::kCanReplaceMapping && ArtClassDefinition::kEnableOnDemandDexDequicken) {
+ gTransformFaultHandler = new TransformationFaultHandler(&art::fault_manager);
+ }
+}
+
+// Simple helper to add and remove the class definition from the fault handler.
+class ScopedDefinitionHandler {
+ public:
+ explicit ScopedDefinitionHandler(ArtClassDefinition* def)
+ : def_(def), is_lazy_(def_->IsLazyDefinition()) {
+ if (is_lazy_) {
+ gTransformFaultHandler->AddArtDefinition(def_);
+ }
+ }
+
+ ~ScopedDefinitionHandler() {
+ if (is_lazy_) {
+ gTransformFaultHandler->RemoveDefinition(def_);
+ }
+ }
+
+ private:
+ ArtClassDefinition* def_;
+ bool is_lazy_;
+};
+
+// Initialize templates.
+template
+void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
+ EventHandler* event_handler, art::Thread* self, /*in-out*/ArtClassDefinition* def);
+template
+void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
+ EventHandler* event_handler, art::Thread* self, /*in-out*/ArtClassDefinition* def);
+
+template<ArtJvmtiEvent kEvent>
+void Transformer::TransformSingleClassDirect(EventHandler* event_handler,
+ art::Thread* self,
+ /*in-out*/ArtClassDefinition* def) {
+ static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
+ kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable,
+ "bad event type");
+ // We don't want to do transitions between calling the event and setting the new data so change to
+ // native state early. This also avoids any problems that the FaultHandler might have in
+ // determining if an access to the dex_data is from generated code or not.
+ art::ScopedThreadStateChange stsc(self, art::ThreadState::kNative);
+ ScopedDefinitionHandler handler(def);
+ jint new_len = -1;
+ unsigned char* new_data = nullptr;
+ art::ArrayRef<const unsigned char> dex_data = def->GetDexData();
+ event_handler->DispatchEvent<kEvent>(
+ self,
+ static_cast<JNIEnv*>(self->GetJniEnv()),
+ def->GetClass(),
+ def->GetLoader(),
+ def->GetName().c_str(),
+ def->GetProtectionDomain(),
+ static_cast<jint>(dex_data.size()),
+ dex_data.data(),
+ /*out*/&new_len,
+ /*out*/&new_data);
+ def->SetNewDexData(new_len, new_data);
+}
+
jvmtiError Transformer::RetransformClassesDirect(
- ArtJvmTiEnv* env,
EventHandler* event_handler,
art::Thread* self,
/*in-out*/std::vector<ArtClassDefinition>* definitions) {
for (ArtClassDefinition& def : *definitions) {
- jint new_len = -1;
- unsigned char* new_data = nullptr;
- art::ArrayRef<const unsigned char> dex_data = def.GetDexData();
- event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
- self,
- GetJniEnv(env),
- def.GetClass(),
- def.GetLoader(),
- def.GetName().c_str(),
- def.GetProtectionDomain(),
- static_cast<jint>(dex_data.size()),
- dex_data.data(),
- /*out*/&new_len,
- /*out*/&new_data);
- def.SetNewDexData(env, new_len, new_data);
+ TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(event_handler,
+ self,
+ &def);
}
return OK;
}
@@ -120,13 +321,13 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
return ERR(UNMODIFIABLE_CLASS);
}
ArtClassDefinition def;
- res = def.Init(env, classes[i]);
+ res = def.Init(self, classes[i]);
if (res != OK) {
return res;
}
definitions.push_back(std::move(def));
}
- res = RetransformClassesDirect(env, event_handler, self, &definitions);
+ res = RetransformClassesDirect(event_handler, self, &definitions);
if (res != OK) {
return res;
}
diff --git a/openjdkjvmti/transform.h b/openjdkjvmti/transform.h
index 6bbe60a91f..8bbeda4b09 100644
--- a/openjdkjvmti/transform.h
+++ b/openjdkjvmti/transform.h
@@ -48,8 +48,15 @@ jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string*
class Transformer {
public:
+ static void Setup();
+
+ template<ArtJvmtiEvent kEvent>
+ static void TransformSingleClassDirect(
+ EventHandler* event_handler,
+ art::Thread* self,
+ /*in-out*/ArtClassDefinition* def);
+
static jvmtiError RetransformClassesDirect(
- ArtJvmTiEnv* env,
EventHandler* event_handler,
art::Thread* self,
/*in-out*/std::vector<ArtClassDefinition>* definitions);
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
index 0902823644..0e8e517cd4 100644
--- a/patchoat/Android.bp
+++ b/patchoat/Android.bp
@@ -26,6 +26,7 @@ cc_defaults {
},
shared_libs: [
"libbase",
+ "libcrypto", // For computing the digest of image file
],
}
@@ -58,5 +59,6 @@ art_cc_test {
],
shared_libs: [
"libartd",
+ "libcrypto", // For computing the digest of image file
],
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index eb648cba18..d9cefee611 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -15,6 +15,7 @@
*/
#include "patchoat.h"
+#include <openssl/sha.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
@@ -24,12 +25,14 @@
#include <string>
#include <vector>
+#include "android-base/file.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/dumpable.h"
+#include "base/file_utils.h"
#include "base/logging.h" // For InitLogging.
#include "base/memory_tool.h"
#include "base/scoped_flock.h"
@@ -42,6 +45,7 @@
#include "gc/space/image_space.h"
#include "image-inl.h"
#include "intern_table.h"
+#include "leb128.h"
#include "mirror/dex_cache.h"
#include "mirror/executable.h"
#include "mirror/method.h"
@@ -58,6 +62,8 @@
namespace art {
+using android::base::StringPrintf;
+
static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
uint64_t off = 0;
if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
@@ -120,11 +126,254 @@ static bool SymlinkFile(const std::string& input_filename, const std::string& ou
return true;
}
+bool PatchOat::GeneratePatch(
+ const MemMap& original,
+ const MemMap& relocated,
+ std::vector<uint8_t>* output,
+ std::string* error_msg) {
+ // FORMAT of the patch (aka image relocation) file:
+ // * SHA-256 digest (32 bytes) of original/unrelocated file (e.g., the one from /system)
+ // * List of monotonically increasing offsets (max value defined by uint32_t) at which relocations
+ // occur.
+ // Each element is represented as the delta from the previous offset in the list (first element
+ // is a delta from 0). Each delta is encoded using unsigned LEB128: little-endian
+ // variable-length 7 bits per byte encoding, where all bytes have the highest bit (0x80) set
+ // except for the final byte which does not have that bit set. For example, 0x3f is offset 0x3f,
+ // whereas 0xbf 0x05 is offset (0x3f & 0x7f) | (0x5 << 7) which is 0x2bf. Most deltas end up
+ // being encoding using just one byte, achieving ~4x decrease in relocation file size compared
+ // to the encoding where offsets are stored verbatim, as uint32_t.
+
+ size_t original_size = original.Size();
+ size_t relocated_size = relocated.Size();
+ if (original_size != relocated_size) {
+ *error_msg =
+ StringPrintf(
+ "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
+ return false;
+ }
+ if ((original_size % 4) != 0) {
+ *error_msg = StringPrintf("Image size not multiple of 4: %zu", original_size);
+ return false;
+ }
+ if (original_size > UINT32_MAX) {
+ *error_msg = StringPrintf("Image too large: %zu" , original_size);
+ return false;
+ }
+
+ const ImageHeader& relocated_header =
+ *reinterpret_cast<const ImageHeader*>(relocated.Begin());
+ // Offsets are supposed to differ between original and relocated by this value
+ off_t expected_diff = relocated_header.GetPatchDelta();
+ if (expected_diff == 0) {
+ // Can't identify offsets which are supposed to differ due to relocation
+ *error_msg = "Relocation delta is 0";
+ return false;
+ }
+
+ // Output the SHA-256 digest of the original
+ output->resize(SHA256_DIGEST_LENGTH);
+ const uint8_t* original_bytes = original.Begin();
+ SHA256(original_bytes, original_size, output->data());
+
+ // Output the list of offsets at which the original and patched images differ
+ size_t last_diff_offset = 0;
+ size_t diff_offset_count = 0;
+ const uint8_t* relocated_bytes = relocated.Begin();
+ for (size_t offset = 0; offset < original_size; offset += 4) {
+ uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
+ uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
+ off_t diff = relocated_value - original_value;
+ if (diff == 0) {
+ continue;
+ } else if (diff != expected_diff) {
+ *error_msg =
+ StringPrintf(
+ "Unexpected diff at offset %zu. Expected: %jd, but was: %jd",
+ offset,
+ (intmax_t) expected_diff,
+ (intmax_t) diff);
+ return false;
+ }
+
+ uint32_t offset_diff = offset - last_diff_offset;
+ last_diff_offset = offset;
+ diff_offset_count++;
+
+ EncodeUnsignedLeb128(output, offset_diff);
+ }
+
+ if (diff_offset_count == 0) {
+ *error_msg = "Original and patched images are identical";
+ return false;
+ }
+
+ return true;
+}
+
+static bool WriteRelFile(
+ const MemMap& original,
+ const MemMap& relocated,
+ const std::string& rel_filename,
+ std::string* error_msg) {
+ std::vector<uint8_t> output;
+ if (!PatchOat::GeneratePatch(original, relocated, &output, error_msg)) {
+ return false;
+ }
+
+ std::unique_ptr<File> rel_file(OS::CreateEmptyFileWriteOnly(rel_filename.c_str()));
+ if (rel_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to create/open output file %s", rel_filename.c_str());
+ return false;
+ }
+ if (!rel_file->WriteFully(output.data(), output.size())) {
+ *error_msg = StringPrintf("Failed to write to %s", rel_filename.c_str());
+ return false;
+ }
+ if (rel_file->FlushCloseOrErase() != 0) {
+ *error_msg = StringPrintf("Failed to flush and close %s", rel_filename.c_str());
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckImageIdenticalToOriginalExceptForRelocation(
+ const std::string& relocated_filename,
+ const std::string& original_filename,
+ std::string* error_msg) {
+ *error_msg = "";
+ std::string rel_filename = original_filename + ".rel";
+ std::unique_ptr<File> rel_file(OS::OpenFileForReading(rel_filename.c_str()));
+ if (rel_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open image relocation file %s", rel_filename.c_str());
+ return false;
+ }
+ int64_t rel_size = rel_file->GetLength();
+ if (rel_size < 0) {
+ *error_msg = StringPrintf("Error while getting size of image relocation file %s",
+ rel_filename.c_str());
+ return false;
+ }
+ std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]);
+ if (!rel_file->ReadFully(rel.get(), rel_size)) {
+ *error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str());
+ return false;
+ }
+
+ std::unique_ptr<File> image_file(OS::OpenFileForReading(relocated_filename.c_str()));
+ if (image_file.get() == nullptr) {
+ *error_msg = StringPrintf("Unable to open relocated image file %s",
+ relocated_filename.c_str());
+ return false;
+ }
+
+ int64_t image_size = image_file->GetLength();
+ if (image_size < 0) {
+ *error_msg = StringPrintf("Error while getting size of relocated image file %s",
+ relocated_filename.c_str());
+ return false;
+ }
+ if ((image_size % 4) != 0) {
+ *error_msg =
+ StringPrintf(
+ "Relocated image file %s size not multiple of 4: %" PRId64,
+ relocated_filename.c_str(), image_size);
+ return false;
+ }
+ if (image_size > std::numeric_limits<uint32_t>::max()) {
+ *error_msg =
+ StringPrintf(
+ "Relocated image file %s too large: %" PRId64, relocated_filename.c_str(), image_size);
+ return false;
+ }
+
+ std::unique_ptr<uint8_t[]> image(new uint8_t[image_size]);
+ if (!image_file->ReadFully(image.get(), image_size)) {
+ *error_msg = StringPrintf("Failed to read relocated image file %s", relocated_filename.c_str());
+ return false;
+ }
+
+ const uint8_t* original_image_digest = rel.get();
+ if (rel_size < SHA256_DIGEST_LENGTH) {
+ *error_msg = StringPrintf("Malformed image relocation file %s: too short",
+ rel_filename.c_str());
+ return false;
+ }
+
+ const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
+ off_t expected_diff = image_header.GetPatchDelta();
+
+ if (expected_diff == 0) {
+ *error_msg = StringPrintf("Unsuported patch delta of zero in %s",
+ relocated_filename.c_str());
+ return false;
+ }
+
+ // Relocated image is expected to differ from the original due to relocation.
+ // Unrelocate the image in memory to compensate.
+ uint8_t* image_start = image.get();
+ const uint8_t* rel_end = &rel[rel_size];
+ const uint8_t* rel_ptr = &rel[SHA256_DIGEST_LENGTH];
+ // The remaining .rel file consists of offsets at which relocation should've occurred.
+ // For each offset, we "unrelocate" the image by subtracting the expected relocation
+ // diff value (as specified in the image header).
+ //
+ // Each offset is encoded as a delta/diff relative to the previous offset. With the
+ // very first offset being encoded relative to offset 0.
+ // Deltas are encoded using little-endian 7 bits per byte encoding, with all bytes except
+ // the last one having the highest bit set.
+ uint32_t offset = 0;
+ while (rel_ptr != rel_end) {
+ uint32_t offset_delta = 0;
+ if (DecodeUnsignedLeb128Checked(&rel_ptr, rel_end, &offset_delta)) {
+ offset += offset_delta;
+ uint32_t *image_value = reinterpret_cast<uint32_t*>(image_start + offset);
+ *image_value -= expected_diff;
+ } else {
+ *error_msg =
+ StringPrintf(
+ "Malformed image relocation file %s: "
+ "last byte has it's most significant bit set",
+ rel_filename.c_str());
+ return false;
+ }
+ }
+
+ // Image in memory is now supposed to be identical to the original. We
+ // confirm this by comparing the digest of the in-memory image to the expected
+ // digest from relocation file.
+ uint8_t image_digest[SHA256_DIGEST_LENGTH];
+ SHA256(image.get(), image_size, image_digest);
+ if (memcmp(image_digest, original_image_digest, SHA256_DIGEST_LENGTH) != 0) {
+ *error_msg =
+ StringPrintf(
+ "Relocated image %s does not match the original %s after unrelocation",
+ relocated_filename.c_str(),
+ original_filename.c_str());
+ return false;
+ }
+
+ // Relocated image is identical to the original, once relocations are taken into account
+ return true;
+}
+
bool PatchOat::Patch(const std::string& image_location,
off_t delta,
- const std::string& output_directory,
+ const std::string& output_image_directory,
+ const std::string& output_image_relocation_directory,
InstructionSet isa,
TimingLogger* timings) {
+ bool output_image = !output_image_directory.empty();
+ bool output_image_relocation = !output_image_relocation_directory.empty();
+ if ((!output_image) && (!output_image_relocation)) {
+ // Nothing to do
+ return true;
+ }
+ if ((output_image_relocation) && (delta == 0)) {
+ LOG(ERROR) << "Cannot output image relocation information when requested relocation delta is 0";
+ return false;
+ }
+
CHECK(Runtime::Current() == nullptr);
CHECK(!image_location.empty()) << "image file must have a filename.";
@@ -221,32 +470,35 @@ bool PatchOat::Patch(const std::string& image_location,
return false;
}
- MaybePic is_oat_pic = IsOatPic(elf.get());
- if (is_oat_pic >= ERROR_FIRST) {
- // Error logged by IsOatPic
- return false;
- } else if (is_oat_pic == NOT_PIC) {
- LOG(ERROR) << "patchoat cannot be used on non-PIC oat file: " << input_oat_file->GetPath();
- return false;
- } else {
- CHECK(is_oat_pic == PIC);
-
- // Create a symlink.
- std::string converted_image_filename = space->GetImageLocation();
- std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
- std::string output_image_filename = output_directory +
- (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
- std::string output_vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
- std::string output_oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
-
- if (!ReplaceOatFileWithSymlink(input_oat_file->GetPath(),
- output_oat_filename) ||
- !SymlinkFile(input_vdex_filename, output_vdex_filename)) {
- // Errors already logged by above call.
+ if (output_image) {
+ MaybePic is_oat_pic = IsOatPic(elf.get());
+ if (is_oat_pic >= ERROR_FIRST) {
+ // Error logged by IsOatPic
+ return false;
+ } else if (is_oat_pic == NOT_PIC) {
+ LOG(ERROR) << "patchoat cannot be used on non-PIC oat file: " << input_oat_file->GetPath();
return false;
+ } else {
+ CHECK(is_oat_pic == PIC);
+
+ // Create a symlink.
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(
+ converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_image_directory +
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::string output_vdex_filename =
+ ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
+ std::string output_oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
+
+ if (!ReplaceOatFileWithSymlink(input_oat_file->GetPath(),
+ output_oat_filename) ||
+ !SymlinkFile(input_vdex_filename, output_vdex_filename)) {
+ // Errors already logged by above call.
+ return false;
+ }
}
}
@@ -267,28 +519,72 @@ bool PatchOat::Patch(const std::string& image_location,
}
}
- // Write the patched image spaces.
- for (size_t i = 0; i < spaces.size(); ++i) {
- gc::space::ImageSpace* space = spaces[i];
+ if (output_image) {
+ // Write the patched image spaces.
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
- t.NewTiming("Writing image");
- std::string converted_image_filename = space->GetImageLocation();
- std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
- std::string output_image_filename = output_directory +
- (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
- std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
- if (output_image_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
- return false;
+ t.NewTiming("Writing image");
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_image_directory +
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
+ if (output_image_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
+ return false;
+ }
+
+ PatchOat& p = space_to_patchoat_map.find(space)->second;
+
+ bool success = p.WriteImage(output_image_file.get());
+ success = FinishFile(output_image_file.get(), success);
+ if (!success) {
+ return false;
+ }
}
+ }
- PatchOat& p = space_to_patchoat_map.find(space)->second;
+ if (output_image_relocation) {
+ // Write the image relocation information for each space.
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+
+ t.NewTiming("Writing image relocation");
+ std::string original_image_filename(space->GetImageLocation() + ".rel");
+ std::string image_relocation_filename =
+ output_image_relocation_directory
+ + (android::base::StartsWith(original_image_filename, "/") ? "" : "/")
+ + original_image_filename.substr(original_image_filename.find_last_of("/"));
+ File& input_image = *space_to_file_map.find(space)->second;
+ int64_t input_image_size = input_image.GetLength();
+ if (input_image_size < 0) {
+ LOG(ERROR) << "Error while getting input image size";
+ return false;
+ }
+ std::string error_msg;
+ std::unique_ptr<MemMap> original(MemMap::MapFile(input_image_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ input_image.Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image.GetPath().c_str(),
+ &error_msg));
+ if (original.get() == nullptr) {
+ LOG(ERROR) << "Unable to map image file " << input_image.GetPath() << " : " << error_msg;
+ return false;
+ }
- bool success = p.WriteImage(output_image_file.get());
- success = FinishFile(output_image_file.get(), success);
- if (!success) {
- return false;
+ PatchOat& p = space_to_patchoat_map.find(space)->second;
+ const MemMap* relocated = p.image_;
+
+ if (!WriteRelFile(*original, *relocated, image_relocation_filename, &error_msg)) {
+ LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
+ << ": " << error_msg;
+ return false;
+ }
}
}
@@ -301,6 +597,86 @@ bool PatchOat::Patch(const std::string& image_location,
return true;
}
+bool PatchOat::Verify(const std::string& image_location,
+ const std::string& output_image_directory,
+ InstructionSet isa,
+ TimingLogger* timings) {
+ if (image_location.empty()) {
+ LOG(ERROR) << "Original image file not provided";
+ return false;
+ }
+ if (output_image_directory.empty()) {
+ LOG(ERROR) << "Relocated image directory not provided";
+ return false;
+ }
+
+ TimingLogger::ScopedTiming t("Runtime Setup", timings);
+
+ CHECK_NE(isa, InstructionSet::kNone);
+ const char* isa_name = GetInstructionSetString(isa);
+
+ // Set up the runtime
+ RuntimeOptions options;
+ NoopCompilerCallbacks callbacks;
+ options.push_back(std::make_pair("compilercallbacks", &callbacks));
+ std::string img = "-Ximage:" + image_location;
+ options.push_back(std::make_pair(img.c_str(), nullptr));
+ options.push_back(std::make_pair("imageinstructionset", reinterpret_cast<const void*>(isa_name)));
+ options.push_back(std::make_pair("-Xno-sig-chain", nullptr));
+ if (!Runtime::Create(options, false)) {
+ LOG(ERROR) << "Unable to initialize runtime";
+ return false;
+ }
+ std::unique_ptr<Runtime> runtime(Runtime::Current());
+
+ // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
+ // give it away now and then switch to a more manageable ScopedObjectAccess.
+ Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+ ScopedObjectAccess soa(Thread::Current());
+
+ t.NewTiming("Image Verification setup");
+ std::vector<gc::space::ImageSpace*> spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
+
+ // TODO: Check that no other .rel files exist in the original dir
+
+ bool success = true;
+ std::string image_location_dir = android::base::Dirname(image_location);
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+ std::string image_filename = space->GetImageLocation();
+
+ std::string relocated_image_filename;
+ std::string error_msg;
+ if (!GetDalvikCacheFilename(image_filename.c_str(),
+ output_image_directory.c_str(), &relocated_image_filename, &error_msg)) {
+ LOG(ERROR) << "Failed to find relocated image file name: " << error_msg;
+ success = false;
+ break;
+ }
+ // location: /system/framework/boot.art
+ // isa: arm64
+ // basename: boot.art
+ // original: /system/framework/arm64/boot.art
+ // relocation: /system/framework/arm64/boot.art.rel
+ std::string original_image_filename = GetSystemImageFilename(image_filename.c_str(), isa);
+
+ if (!CheckImageIdenticalToOriginalExceptForRelocation(
+ relocated_image_filename, original_image_filename, &error_msg)) {
+ LOG(ERROR) << error_msg;
+ success = false;
+ break;
+ }
+ }
+
+ if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ // We want to just exit on non-debug builds, not bringing the runtime down
+ // in an orderly fashion. So release the following fields.
+ runtime.release();
+ }
+
+ return success;
+}
+
bool PatchOat::WriteImage(File* out) {
TimingLogger::ScopedTiming t("Writing image File", timings_);
std::string error_msg;
@@ -739,9 +1115,14 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --output-image-file=<file.art>: Specifies the exact file to write the patched");
UsageError(" image file to.");
UsageError("");
+ UsageError(" --output-image-relocation-file=<file.art.rel>: Specifies the exact file to write");
+ UsageError(" the image relocation information to.");
+ UsageError("");
UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
UsageError(" This value may be negative.");
UsageError("");
+ UsageError(" --verify: Verify an existing patched file instead of creating one.");
+ UsageError("");
UsageError(" --dump-timings: dump out patch timing information");
UsageError("");
UsageError(" --no-dump-timings: do not dump out patch timing information");
@@ -750,16 +1131,17 @@ NO_RETURN static void Usage(const char *fmt, ...) {
exit(EXIT_FAILURE);
}
-static int patchoat_image(TimingLogger& timings,
- InstructionSet isa,
- const std::string& input_image_location,
- const std::string& output_image_filename,
- off_t base_delta,
- bool base_delta_set,
- bool debug) {
+static int patchoat_patch_image(TimingLogger& timings,
+ InstructionSet isa,
+ const std::string& input_image_location,
+ const std::string& output_image_directory,
+ const std::string& output_image_relocation_filename,
+ off_t base_delta,
+ bool base_delta_set,
+ bool debug) {
CHECK(!input_image_location.empty());
- if (output_image_filename.empty()) {
- Usage("Image patching requires --output-image-file");
+ if ((output_image_directory.empty()) && (output_image_relocation_filename.empty())) {
+ Usage("Image patching requires --output-image-file or --output-image-relocation-file");
}
if (!base_delta_set) {
@@ -778,9 +1160,37 @@ static int patchoat_image(TimingLogger& timings,
TimingLogger::ScopedTiming pt("patch image and oat", &timings);
- std::string output_directory =
- output_image_filename.substr(0, output_image_filename.find_last_of('/'));
- bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings);
+ std::string output_image_relocation_directory =
+ output_image_relocation_filename.substr(
+ 0, output_image_relocation_filename.find_last_of('/'));
+ bool ret =
+ PatchOat::Patch(
+ input_image_location,
+ base_delta,
+ output_image_directory,
+ output_image_relocation_directory,
+ isa,
+ &timings);
+
+ if (kIsDebugBuild) {
+ LOG(INFO) << "Exiting with return ... " << ret;
+ }
+ return ret ? EXIT_SUCCESS : EXIT_FAILURE;
+}
+
+static int patchoat_verify_image(TimingLogger& timings,
+ InstructionSet isa,
+ const std::string& input_image_location,
+ const std::string& output_image_directory) {
+ CHECK(!input_image_location.empty());
+ TimingLogger::ScopedTiming pt("verify image and oat", &timings);
+
+ bool ret =
+ PatchOat::Verify(
+ input_image_location,
+ output_image_directory,
+ isa,
+ &timings);
if (kIsDebugBuild) {
LOG(INFO) << "Exiting with return ... " << ret;
@@ -811,9 +1221,11 @@ static int patchoat(int argc, char **argv) {
InstructionSet isa = InstructionSet::kNone;
std::string input_image_location;
std::string output_image_filename;
+ std::string output_image_relocation_filename;
off_t base_delta = 0;
bool base_delta_set = false;
bool dump_timings = kIsDebugBuild;
+ bool verify = false;
for (int i = 0; i < argc; ++i) {
const StringPiece option(argv[i]);
@@ -832,6 +1244,9 @@ static int patchoat(int argc, char **argv) {
input_image_location = option.substr(strlen("--input-image-location=")).data();
} else if (option.starts_with("--output-image-file=")) {
output_image_filename = option.substr(strlen("--output-image-file=")).data();
+ } else if (option.starts_with("--output-image-relocation-file=")) {
+ output_image_relocation_filename =
+ option.substr(strlen("--output-image-relocation-file=")).data();
} else if (option.starts_with("--base-offset-delta=")) {
const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
base_delta_set = true;
@@ -842,23 +1257,40 @@ static int patchoat(int argc, char **argv) {
dump_timings = true;
} else if (option == "--no-dump-timings") {
dump_timings = false;
+ } else if (option == "--verify") {
+ verify = true;
} else {
Usage("Unknown argument %s", option.data());
}
}
+ // TODO: Have calls to patchoat pass in the output_image directory instead of
+ // the output_image_filename.
+ std::string output_image_directory;
+ if (!output_image_filename.empty())
+ output_image_directory = android::base::Dirname(output_image_filename);
+
// The instruction set is mandatory. This simplifies things...
if (!isa_set) {
Usage("Instruction set must be set.");
}
- int ret = patchoat_image(timings,
- isa,
- input_image_location,
- output_image_filename,
- base_delta,
- base_delta_set,
- debug);
+ int ret;
+ if (verify) {
+ ret = patchoat_verify_image(timings,
+ isa,
+ input_image_location,
+ output_image_directory);
+ } else {
+ ret = patchoat_patch_image(timings,
+ isa,
+ input_image_location,
+ output_image_directory,
+ output_image_relocation_filename,
+ base_delta,
+ base_delta_set,
+ debug);
+ }
timings.EndTiming();
if (dump_timings) {
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 83516845d8..ba59d570a7 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -44,11 +44,28 @@ class Class;
class PatchOat {
public:
+ // Relocates the provided image by the specified offset. If output_image_directory is non-empty,
+ // outputs the relocated image into that directory. If output_image_relocation_directory is
+ // non-empty, outputs image relocation files (see GeneratePatch) into that directory.
static bool Patch(const std::string& image_location,
off_t delta,
- const std::string& output_directory,
+ const std::string& output_image_directory,
+ const std::string& output_image_relocation_directory,
InstructionSet isa,
TimingLogger* timings);
+ static bool Verify(const std::string& image_location,
+ const std::string& output_image_filename,
+ InstructionSet isa,
+ TimingLogger* timings);
+
+ // Generates a patch which can be used to efficiently relocate the original file or to check that
+ // a relocated file matches the original. The patch is generated from the difference of the
+ // |original| and the already |relocated| image, and written to |output| in the form of unsigned
+ // LEB128 for each relocation position.
+ static bool GeneratePatch(const MemMap& original,
+ const MemMap& relocated,
+ std::vector<uint8_t>* output,
+ std::string* error_msg);
~PatchOat() {}
PatchOat(PatchOat&&) = default;
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
index 86e851c72b..69c6bfae30 100644
--- a/patchoat/patchoat_test.cc
+++ b/patchoat/patchoat_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <openssl/sha.h>
#include <dirent.h>
#include <sys/types.h>
@@ -24,6 +25,7 @@
#include "android-base/strings.h"
#include "dexopt_test.h"
+#include "leb128.h"
#include "runtime.h"
#include <gtest/gtest.h>
@@ -122,18 +124,46 @@ class PatchoatTest : public DexoptTest {
return RunDex2OatOrPatchoat(argv, error_msg);
}
- bool RelocateBootImage(const std::string& input_image_location,
- const std::string& output_image_filename,
- off_t base_offset_delta,
- std::string* error_msg) {
+ static std::vector<std::string> BasePatchoatCommand(const std::string& input_image_location,
+ off_t base_offset_delta) {
Runtime* const runtime = Runtime::Current();
std::vector<std::string> argv;
argv.push_back(runtime->GetPatchoatExecutable());
argv.push_back("--input-image-location=" + input_image_location);
- argv.push_back("--output-image-file=" + output_image_filename);
argv.push_back(StringPrintf("--base-offset-delta=0x%jx", (intmax_t) base_offset_delta));
argv.push_back(StringPrintf("--instruction-set=%s", GetInstructionSetString(kRuntimeISA)));
+ return argv;
+ }
+
+ bool RelocateBootImage(const std::string& input_image_location,
+ const std::string& output_image_filename,
+ off_t base_offset_delta,
+ std::string* error_msg) {
+ std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
+ argv.push_back("--output-image-file=" + output_image_filename);
+
+ return RunDex2OatOrPatchoat(argv, error_msg);
+ }
+
+ bool VerifyBootImage(const std::string& input_image_location,
+ const std::string& output_image_filename,
+ off_t base_offset_delta,
+ std::string* error_msg) {
+ std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
+ argv.push_back("--output-image-file=" + output_image_filename);
+ argv.push_back("--verify");
+
+ return RunDex2OatOrPatchoat(argv, error_msg);
+ }
+
+ bool GenerateBootImageRelFile(const std::string& input_image_location,
+ const std::string& output_rel_filename,
+ off_t base_offset_delta,
+ std::string* error_msg) {
+ std::vector<std::string> argv = BasePatchoatCommand(input_image_location, base_offset_delta);
+ argv.push_back("--output-image-relocation-file=" + output_rel_filename);
+
return RunDex2OatOrPatchoat(argv, error_msg);
}
@@ -275,7 +305,7 @@ class PatchoatTest : public DexoptTest {
*error_msg = StringPrintf("Failed to read %s: %s", filename2.c_str(), read_error_msg.c_str());
return true;
}
- if (image1.size() != image2.size()) {
+ if (image1.size() != image1.size()) {
*error_msg =
StringPrintf(
"%s and %s are of different size: %zu vs %zu",
@@ -408,4 +438,103 @@ TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
#endif
}
+TEST_F(PatchoatTest, RelFileVerification) {
+ // This test checks that a boot image relocated using patchoat can be unrelocated using the .rel
+ // file created by patchoat.
+
+ // This test doesn't work when heap poisoning is enabled because some of the
+ // references are negated. b/72117833 is tracking the effort to have patchoat
+ // and its tests support heap poisoning.
+ TEST_DISABLED_FOR_HEAP_POISONING();
+
+ // Compile boot image into a random directory using dex2oat
+ ScratchFile dex2oat_orig_scratch;
+ dex2oat_orig_scratch.Unlink();
+ std::string dex2oat_orig_dir = dex2oat_orig_scratch.GetFilename();
+ ASSERT_EQ(0, mkdir(dex2oat_orig_dir.c_str(), 0700));
+ const uint32_t orig_base_addr = 0x60000000;
+ std::vector<std::string> dex2oat_extra_args;
+ std::string error_msg;
+ if (!CompileBootImageToDir(dex2oat_orig_dir, dex2oat_extra_args, orig_base_addr, &error_msg)) {
+ FAIL() << "CompileBootImage1 failed: " << error_msg;
+ }
+
+ // Generate image relocation file for the original boot image
+ std::string dex2oat_orig_with_arch_dir =
+ dex2oat_orig_dir + "/" + GetInstructionSetString(kRuntimeISA);
+ // The arch-including symlink is needed by patchoat
+ ASSERT_EQ(0, symlink(dex2oat_orig_dir.c_str(), dex2oat_orig_with_arch_dir.c_str()));
+ off_t base_addr_delta = 0x100000;
+ if (!GenerateBootImageRelFile(
+ dex2oat_orig_dir + "/boot.art",
+ dex2oat_orig_dir + "/boot.art.rel",
+ base_addr_delta,
+ &error_msg)) {
+ FAIL() << "RelocateBootImage failed: " << error_msg;
+ }
+
+ // Relocate the original boot image using patchoat
+ ScratchFile relocated_scratch;
+ relocated_scratch.Unlink();
+ std::string relocated_dir = relocated_scratch.GetFilename();
+ ASSERT_EQ(0, mkdir(relocated_dir.c_str(), 0700));
+ // Use a different relocation delta from the one used when generating .rel files above. This is
+ // to make sure .rel files are not specific to a particular relocation delta.
+ base_addr_delta -= 0x10000;
+ if (!RelocateBootImage(
+ dex2oat_orig_dir + "/boot.art",
+ relocated_dir + "/boot.art",
+ base_addr_delta,
+ &error_msg)) {
+ FAIL() << "RelocateBootImage failed: " << error_msg;
+ }
+
+ // Assert that patchoat created the same set of .art and .art.rel files
+ std::vector<std::string> rel_basenames;
+ std::vector<std::string> relocated_image_basenames;
+ if (!ListDirFilesEndingWith(dex2oat_orig_dir, ".rel", &rel_basenames, &error_msg)) {
+ FAIL() << "Failed to list *.art.rel files in " << dex2oat_orig_dir << ": " << error_msg;
+ }
+ if (!ListDirFilesEndingWith(relocated_dir, ".art", &relocated_image_basenames, &error_msg)) {
+ FAIL() << "Failed to list *.art files in " << relocated_dir << ": " << error_msg;
+ }
+ std::sort(rel_basenames.begin(), rel_basenames.end());
+ std::sort(relocated_image_basenames.begin(), relocated_image_basenames.end());
+
+ // .art and .art.rel file names output by patchoat look like
+ // tmp@art-data-<random>-<random>@boot*.art, encoding the name of the directory in their name.
+ // To compare these with each other, we retain only the part of the file name after the last @,
+ // and we also drop the extension.
+ std::vector<std::string> rel_shortened_basenames(rel_basenames.size());
+ std::vector<std::string> relocated_image_shortened_basenames(relocated_image_basenames.size());
+ for (size_t i = 0; i < rel_basenames.size(); i++) {
+ rel_shortened_basenames[i] = rel_basenames[i].substr(rel_basenames[i].find_last_of("@") + 1);
+ rel_shortened_basenames[i] =
+ rel_shortened_basenames[i].substr(0, rel_shortened_basenames[i].find("."));
+ }
+ for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
+ relocated_image_shortened_basenames[i] =
+ relocated_image_basenames[i].substr(relocated_image_basenames[i].find_last_of("@") + 1);
+ relocated_image_shortened_basenames[i] =
+ relocated_image_shortened_basenames[i].substr(
+ 0, relocated_image_shortened_basenames[i].find("."));
+ }
+ ASSERT_EQ(rel_shortened_basenames, relocated_image_shortened_basenames);
+
+ // Assert that verification works with the .rel files.
+ if (!VerifyBootImage(
+ dex2oat_orig_dir + "/boot.art",
+ relocated_dir + "/boot.art",
+ base_addr_delta,
+ &error_msg)) {
+ FAIL() << "VerifyBootImage failed: " << error_msg;
+ }
+
+ ClearDirectory(dex2oat_orig_dir.c_str(), /*recursive*/ true);
+ ClearDirectory(relocated_dir.c_str(), /*recursive*/ true);
+
+ rmdir(dex2oat_orig_dir.c_str());
+ rmdir(relocated_dir.c_str());
+}
+
} // namespace art
diff --git a/profman/Android.bp b/profman/Android.bp
index ea682b40a0..6592b9dec0 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -31,6 +31,7 @@ cc_defaults {
},
shared_libs: [
+ "libdexfile",
"libbase",
],
}
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index a750105e72..3d003a7f06 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -19,6 +19,7 @@
#include "boot_image_profile.h"
#include "dex/dex_file-inl.h"
+#include "jit/profile_compilation_info.h"
#include "method_reference.h"
#include "type_reference.h"
diff --git a/profman/boot_image_profile.h b/profman/boot_image_profile.h
index eb43b7ca7f..99e5a75939 100644
--- a/profman/boot_image_profile.h
+++ b/profman/boot_image_profile.h
@@ -22,10 +22,11 @@
#include <vector>
#include "dex/dex_file.h"
-#include "jit/profile_compilation_info.h"
namespace art {
+class ProfileCompilationInfo;
+
struct BootImageOptions {
public:
// Threshold for classes that may be dirty or clean. The threshold specifies how
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index ff02b5d59f..a00b1fa5bd 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -31,12 +31,13 @@ static constexpr const uint32_t kMinNewClassesPercentChangeForCompilation = 2;
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file) {
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK(!profile_files.empty());
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file->Fd())) {
+ if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -48,7 +49,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i]->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -122,7 +123,8 @@ class ScopedFlockList {
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
- int reference_profile_file_fd) {
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK_GE(reference_profile_file_fd, 0);
std::string error;
@@ -143,12 +145,15 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
+ return ProcessProfilesInternal(profile_files.Get(),
+ reference_profile_file,
+ filter_fn);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file) {
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
std::string error;
ScopedFlockList profile_files_list(profile_files.size());
@@ -164,7 +169,9 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
+ return ProcessProfilesInternal(profile_files_list.Get(),
+ locked_reference_profile_file,
+ filter_fn);
}
} // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index be703abda8..ee555840d7 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -53,16 +53,21 @@ class ProfileAssistant {
//
static ProcessingResult ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file);
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
static ProcessingResult ProcessProfiles(
const std::vector<int>& profile_files_fd_,
- int reference_profile_file_fd);
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
private:
static ProcessingResult ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file);
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
};
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index 642d26e8f5..79310ac166 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include "android-base/strings.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
@@ -51,6 +52,28 @@ class ProfileAssistantTest : public CommonRuntimeTest {
uint32_t dex_location_checksum1 = checksum;
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
+ SetupProfile(dex_location1,
+ dex_location_checksum1,
+ dex_location2,
+ dex_location_checksum2,
+ number_of_methods,
+ number_of_classes,
+ profile,
+ info,
+ start_method_index,
+ reverse_dex_write_order);
+ }
+
+ void SetupProfile(const std::string& dex_location1,
+ uint32_t dex_location_checksum1,
+ const std::string& dex_location2,
+ uint32_t dex_location_checksum2,
+ uint16_t number_of_methods,
+ uint16_t number_of_classes,
+ const ScratchFile& profile,
+ ProfileCompilationInfo* info,
+ uint16_t start_method_index = 0,
+ bool reverse_dex_write_order = false) {
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
// reverse_dex_write_order controls the order in which the dex files will be added to
// the profile and thus written to disk.
@@ -209,7 +232,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
bool CreateProfile(const std::string& profile_file_contents,
const std::string& filename,
- const std::string& dex_location) {
+ const std::string& dex_location,
+ bool skip_verification) {
ScratchFile class_names_file;
File* file = class_names_file.GetFile();
EXPECT_TRUE(file->WriteFully(profile_file_contents.c_str(), profile_file_contents.length()));
@@ -222,6 +246,9 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back("--reference-profile-file=" + filename);
argv_str.push_back("--apk=" + dex_location);
argv_str.push_back("--dex-location=" + dex_location);
+ if (skip_verification) {
+ argv_str.push_back("--skip-apk-verification");
+ }
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
return true;
@@ -238,6 +265,7 @@ class ProfileAssistantTest : public CommonRuntimeTest {
argv_str.push_back("--profile-file=" + filename);
argv_str.push_back("--apk=" + GetLibCoreDexFileNames()[0]);
argv_str.push_back("--dex-location=" + GetLibCoreDexFileNames()[0]);
+ argv_str.push_back("--skip-apk-verification");
argv_str.push_back("--dump-output-to-fd=" + std::to_string(GetFd(output_file)));
std::string error;
EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0);
@@ -268,7 +296,8 @@ class ProfileAssistantTest : public CommonRuntimeTest {
ScratchFile profile_file;
EXPECT_TRUE(CreateProfile(input_file_contents,
profile_file.GetFilename(),
- GetLibCoreDexFileNames()[0]));
+ GetLibCoreDexFileNames()[0],
+ /* skip_verification */ true));
profile_file.GetFile()->ResetOffset();
EXPECT_TRUE(DumpClassesAndMethods(profile_file.GetFilename(), output_file_contents));
return true;
@@ -675,7 +704,8 @@ TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) {
ScratchFile profile_file;
EXPECT_TRUE(CreateProfile(input_file_contents,
profile_file.GetFilename(),
- GetLibCoreDexFileNames()[0]));
+ GetLibCoreDexFileNames()[0],
+ /* skip_verification */ true));
ProfileCompilationInfo info;
profile_file.GetFile()->ResetOffset();
ASSERT_TRUE(info.Load(GetFd(profile_file)));
@@ -731,7 +761,8 @@ TEST_F(ProfileAssistantTest, TestBootImageProfile) {
"H" + kHotMethod + "\n" +
kUncommonDirtyClass;
profiles.emplace_back(ScratchFile());
- EXPECT_TRUE(CreateProfile(dex1, profiles.back().GetFilename(), core_dex));
+ EXPECT_TRUE(CreateProfile(
+ dex1, profiles.back().GetFilename(), core_dex, /* skip_verification */ true));
// Create a bunch of boot profiles.
std::string dex2 =
@@ -741,7 +772,8 @@ TEST_F(ProfileAssistantTest, TestBootImageProfile) {
"P" + kMultiMethod + "\n" +
kUncommonDirtyClass;
profiles.emplace_back(ScratchFile());
- EXPECT_TRUE(CreateProfile(dex2, profiles.back().GetFilename(), core_dex));
+ EXPECT_TRUE(CreateProfile(
+ dex2, profiles.back().GetFilename(), core_dex, /* skip_verification */ true));
// Create a bunch of boot profiles.
std::string dex3 =
@@ -750,7 +782,8 @@ TEST_F(ProfileAssistantTest, TestBootImageProfile) {
"P" + kMultiMethod + "\n" +
kDirtyClass + "\n";
profiles.emplace_back(ScratchFile());
- EXPECT_TRUE(CreateProfile(dex3, profiles.back().GetFilename(), core_dex));
+ EXPECT_TRUE(CreateProfile(
+ dex3, profiles.back().GetFilename(), core_dex, /* skip_verification */ true));
// Generate the boot profile.
ScratchFile out_profile;
@@ -763,6 +796,7 @@ TEST_F(ProfileAssistantTest, TestBootImageProfile) {
args.push_back("--reference-profile-file=" + out_profile.GetFilename());
args.push_back("--apk=" + core_dex);
args.push_back("--dex-location=" + core_dex);
+ args.push_back("--skip-apk-verification");
for (const ScratchFile& profile : profiles) {
args.push_back("--profile-file=" + profile.GetFilename());
}
@@ -858,7 +892,8 @@ TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
ScratchFile profile_file;
ASSERT_TRUE(CreateProfile(input_file_contents,
profile_file.GetFilename(),
- GetTestDexFileName("ProfileTestMultiDex")));
+ GetTestDexFileName("ProfileTestMultiDex"),
+ /* skip_verification */ false));
// Load the profile from disk.
ProfileCompilationInfo info;
@@ -1008,7 +1043,8 @@ TEST_F(ProfileAssistantTest, TestProfileCreateWithInvalidData) {
std::string dex_filename = GetTestDexFileName("ProfileTestMultiDex");
ASSERT_TRUE(CreateProfile(input_file_contents,
profile_file.GetFilename(),
- dex_filename));
+ dex_filename,
+ /* skip_verification */ false));
// Load the profile from disk.
ProfileCompilationInfo info;
@@ -1115,4 +1151,89 @@ TEST_F(ProfileAssistantTest, DumpOnly) {
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithFilter) {
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile reference_profile;
+
+ std::vector<int> profile_fds({
+ GetFd(profile1),
+ GetFd(profile2)});
+ int reference_profile_fd = GetFd(reference_profile);
+
+ // Use a real dex file to generate profile test data.
+ // The file will be used during merging to filter unwanted data.
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+ const DexFile& d1 = *dex_files[0];
+ const DexFile& d2 = *dex_files[1];
+ // The new profile info will contain the methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ ProfileCompilationInfo info1;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+ ProfileCompilationInfo info2;
+ SetupProfile(d2.GetLocation(), d2.GetLocationChecksum(), "p2", 2,
+ kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+
+
+ // The reference profile info will contain the methods with indices 50-150.
+ const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
+ ProfileCompilationInfo reference_info;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2);
+
+ // Run profman and pass the dex file with --apk-fd.
+ android::base::unique_fd apk_fd(
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ ASSERT_GE(apk_fd.get(), 0);
+
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+ argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+ argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
+ std::string error;
+
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+ // Verify that we can load the result.
+
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+
+
+ ASSERT_TRUE(profile1.GetFile()->ResetOffset());
+ ASSERT_TRUE(profile2.GetFile()->ResetOffset());
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+
+ // Verify that the result filtered out data not belonging to the dex file.
+ // This is equivalent to checking that the result is equal to the merging of
+ // all profiles while filtering out data not belonging to the dex file.
+
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [&d1, &d2](const std::string& dex_location, uint32_t checksum) -> bool {
+ return (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d1.GetLocation())
+ && checksum == d1.GetLocationChecksum())
+ || (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d2.GetLocation())
+ && checksum == d2.GetLocationChecksum());
+ };
+
+ ProfileCompilationInfo info1_filter;
+ ProfileCompilationInfo info2_filter;
+ ProfileCompilationInfo expected;
+
+ info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
+ info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
+ expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+
+ ASSERT_TRUE(expected.MergeWith(info1_filter));
+ ASSERT_TRUE(expected.MergeWith(info2_filter));
+
+ ASSERT_TRUE(expected.Equals(result));
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index c4216fab99..387ce8dfae 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
+#include <sys/param.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -39,6 +40,7 @@
#include "base/unix_file/fd_file.h"
#include "boot_image_profile.h"
#include "bytecode_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
@@ -136,6 +138,7 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --apk-fd=<number>: file descriptor containing an open APK to");
UsageError(" search for dex files");
UsageError(" --apk-=<filename>: an APK to search for dex files");
+ UsageError(" --skip-apk-verification: do not attempt to verify APKs");
UsageError("");
UsageError(" --generate-boot-image-profile: Generate a boot image profile based on input");
UsageError(" profiles. Requires passing in dex files to inspect properties of classes.");
@@ -148,6 +151,10 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --boot-image-sampled-method-threshold=<value>: minimum number of profiles a");
UsageError(" non-hot method needs to be in order to be hot in the output profile. The");
UsageError(" default is max int.");
+ UsageError(" --copy-and-update-profile-key: if present, profman will copy the profile from");
+ UsageError(" the file passed with --profile-fd(file) to the profile passed with");
+ UsageError(" --reference-profile-fd(file) and update at the same time the profile-key");
+ UsageError(" of entries corresponding to the apks passed with --apk(-fd).");
UsageError("");
exit(EXIT_FAILURE);
@@ -180,12 +187,14 @@ class ProfMan FINAL {
dump_only_(false),
dump_classes_and_methods_(false),
generate_boot_image_profile_(false),
+ skip_apk_verification_(false),
dump_output_to_fd_(kInvalidFd),
test_profile_num_dex_(kDefaultTestProfileNumDex),
test_profile_method_percerntage_(kDefaultTestProfileMethodPercentage),
test_profile_class_percentage_(kDefaultTestProfileClassPercentage),
test_profile_seed_(NanoTime()),
- start_ns_(NanoTime()) {}
+ start_ns_(NanoTime()),
+ copy_and_update_profile_key_(false) {}
~ProfMan() {
LogCompletionTime();
@@ -221,6 +230,8 @@ class ProfMan FINAL {
ParseUintOption(option, "--dump-output-to-fd", &dump_output_to_fd_, Usage);
} else if (option == "--generate-boot-image-profile") {
generate_boot_image_profile_ = true;
+ } else if (option == "--skip-apk-verification") {
+ skip_apk_verification_ = true;
} else if (option.starts_with("--boot-image-class-threshold=")) {
ParseUintOption(option,
"--boot-image-class-threshold",
@@ -287,6 +298,22 @@ class ProfMan FINAL {
}
}
+ struct ProfileFilterKey {
+ ProfileFilterKey(const std::string& dex_location, uint32_t checksum)
+ : dex_location_(dex_location), checksum_(checksum) {}
+ const std::string dex_location_;
+ uint32_t checksum_;
+
+ bool operator==(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_ && dex_location_ == other.dex_location_;
+ }
+ bool operator<(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_
+ ? dex_location_ < other.dex_location_
+ : checksum_ < other.checksum_;
+ }
+ };
+
ProfileAssistant::ProcessingResult ProcessProfiles() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (profile_files_.empty() && profile_files_fd_.empty()) {
@@ -300,63 +327,146 @@ class ProfMan FINAL {
Usage("Options --profile-file-fd and --reference-profile-file-fd "
"should only be used together");
}
+
+ // Check if we have any apks which we should use to filter the profile data.
+ std::set<ProfileFilterKey> profile_filter_keys;
+ if (!GetProfileFilterKeyFromApks(&profile_filter_keys)) {
+ return ProfileAssistant::kErrorIO;
+ }
+
+ // Build the profile filter function. If the set of keys is empty it means we
+ // don't have any apks; as such we do not filter anything.
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn =
+ [profile_filter_keys](const std::string& dex_location, uint32_t checksum) {
+ if (profile_filter_keys.empty()) {
+ // No --apk was specified. Accept all dex files.
+ return true;
+ } else {
+ bool res = profile_filter_keys.find(
+ ProfileFilterKey(dex_location, checksum)) != profile_filter_keys.end();
+ return res;
+ }
+ };
+
ProfileAssistant::ProcessingResult result;
+
if (profile_files_.empty()) {
// The file doesn't need to be flushed here (ProcessProfiles will do it)
// so don't check the usage.
File file(reference_profile_file_fd_, false);
- result = ProfileAssistant::ProcessProfiles(profile_files_fd_, reference_profile_file_fd_);
+ result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
+ reference_profile_file_fd_,
+ filter_fn);
CloseAllFds(profile_files_fd_, "profile_files_fd_");
} else {
- result = ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_file_);
+ result = ProfileAssistant::ProcessProfiles(profile_files_,
+ reference_profile_file_,
+ filter_fn);
}
return result;
}
- void OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ bool ShouldSkipApkVerification() const {
+ return skip_apk_verification_;
+ }
+
+ bool GetProfileFilterKeyFromApks(std::set<ProfileFilterKey>* profile_filter_keys) {
+ auto process_fn = [profile_filter_keys](std::unique_ptr<const DexFile>&& dex_file) {
+ // Store the profile key of the location instead of the location itself.
+ // This will make the matching in the profile filter method much easier.
+ profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileKey(
+ dex_file->GetLocation()), dex_file->GetLocationChecksum());
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ auto process_fn = [dex_files](std::unique_ptr<const DexFile>&& dex_file) {
+ dex_files->emplace_back(std::move(dex_file));
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(
+ std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
bool use_apk_fd_list = !apks_fd_.empty();
if (use_apk_fd_list) {
// Get the APKs from the collection of FDs.
- CHECK_EQ(dex_locations_.size(), apks_fd_.size());
+ if (dex_locations_.empty()) {
+ // Try to compute the dex locations from the file paths of the descriptions.
+ // This will make it easier to invoke profman with --apk-fd and without
+ // being force to pass --dex-location when the location would be the apk path.
+ if (!ComputeDexLocationsFromApkFds()) {
+ return false;
+ }
+ } else {
+ if (dex_locations_.size() != apks_fd_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
+ }
} else if (!apk_files_.empty()) {
- // Get the APKs from the collection of filenames.
- CHECK_EQ(dex_locations_.size(), apk_files_.size());
+ if (dex_locations_.size() != apk_files_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
} else {
// No APKs were specified.
CHECK(dex_locations_.empty());
- return;
+ return true;
}
static constexpr bool kVerifyChecksum = true;
for (size_t i = 0; i < dex_locations_.size(); ++i) {
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
if (use_apk_fd_list) {
- if (DexFileLoader::OpenZip(apks_fd_[i],
- dex_locations_[i],
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (dex_file_loader.OpenZip(apks_fd_[i],
+ dex_locations_[i],
+ /* verify */ !ShouldSkipApkVerification(),
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
- LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
} else {
- if (DexFileLoader::Open(apk_files_[i].c_str(),
- dex_locations_[i],
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (dex_file_loader.Open(apk_files_[i].c_str(),
+ dex_locations_[i],
+ /* verify */ !ShouldSkipApkVerification(),
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
- LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
}
for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
- dex_files->emplace_back(std::move(dex_file));
+ process_fn(std::move(dex_file));
+ }
+ }
+ return true;
+ }
+
+ // Get the dex locations from the apk fds.
+ // The methods reads the links from /proc/self/fd/ to find the original apk paths
+ // and puts them in the dex_locations_ vector.
+ bool ComputeDexLocationsFromApkFds() {
+ // We can't use a char array of PATH_MAX size without exceeding the frame size.
+ // So we use a vector as the buffer for the path.
+ std::vector<char> buffer(PATH_MAX, 0);
+ for (size_t i = 0; i < apks_fd_.size(); ++i) {
+ std::string fd_path = "/proc/self/fd/" + std::to_string(apks_fd_[i]);
+ ssize_t len = readlink(fd_path.c_str(), buffer.data(), buffer.size() - 1);
+ if (len == -1) {
+ PLOG(ERROR) << "Could not open path from fd";
+ return false;
}
+
+ buffer[len] = '\0';
+ dex_locations_.push_back(buffer.data());
}
+ return true;
}
std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
@@ -399,8 +509,6 @@ class ProfMan FINAL {
static const char* kOrdinaryProfile = "=== profile ===";
static const char* kReferenceProfile = "=== reference profile ===";
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
std::string dump;
@@ -536,8 +644,7 @@ class ProfMan FINAL {
reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
Usage("No profile files or reference profile specified.");
}
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
+
// Open the dex files to get the names for classes.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -931,8 +1038,6 @@ class ProfMan FINAL {
Usage("Profile must be specified with --reference-profile-file or "
"--reference-profile-file-fd");
}
- // for ZipArchive::OpenFromFd
- MemMap::Init();
// Open the profile output file if needed.
int fd = OpenReferenceProfile();
if (!FdIsValid(fd)) {
@@ -967,8 +1072,6 @@ class ProfMan FINAL {
}
int CreateBootProfile() {
- // Initialize memmap since it's required to open dex files.
- MemMap::Init();
// Open the profile output file.
const int reference_fd = OpenReferenceProfile();
if (!FdIsValid(reference_fd)) {
@@ -1048,8 +1151,6 @@ class ProfMan FINAL {
test_profile_class_percentage_,
test_profile_seed_);
} else {
- // Initialize MemMap for ZipArchive::OpenFromFd.
- MemMap::Init();
// Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -1068,6 +1169,42 @@ class ProfMan FINAL {
return !test_profile_.empty();
}
+ bool ShouldCopyAndUpdateProfileKey() const {
+ return copy_and_update_profile_key_;
+ }
+
+ bool CopyAndUpdateProfileKey() {
+ // Validate that at least one profile file was passed, as well as a reference profile.
+ if (!(profile_files_.size() == 1 ^ profile_files_fd_.size() == 1)) {
+ Usage("Only one profile file should be specified.");
+ }
+ if (reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
+ Usage("No reference profile file specified.");
+ }
+
+ if (apk_files_.empty() && apks_fd_.empty()) {
+ Usage("No apk files specified");
+ }
+
+ bool use_fds = profile_files_fd_.size() == 1;
+
+ ProfileCompilationInfo profile;
+ // Do not clear if invalid. The input might be an archive.
+ if (profile.Load(profile_files_[0], /*clear_if_invalid*/ false)) {
+ // Open the dex files to look up classes and methods.
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ OpenApkFilesFromLocations(&dex_files);
+ if (!profile.UpdateProfileKeys(dex_files)) {
+ return false;
+ }
+ return use_fds
+ ? profile.Save(reference_profile_file_fd_)
+ : profile.Save(reference_profile_file_, /*bytes_written*/ nullptr);
+ } else {
+ return false;
+ }
+ }
+
private:
static void ParseFdForCollection(const StringPiece& option,
const char* arg_name,
@@ -1080,7 +1217,8 @@ class ProfMan FINAL {
static void CloseAllFds(const std::vector<int>& fds, const char* descriptor) {
for (size_t i = 0; i < fds.size(); i++) {
if (close(fds[i]) < 0) {
- PLOG(WARNING) << "Failed to close descriptor for " << descriptor << " at index " << i;
+ PLOG(WARNING) << "Failed to close descriptor for "
+ << descriptor << " at index " << i << ": " << fds[i];
}
}
}
@@ -1103,6 +1241,7 @@ class ProfMan FINAL {
bool dump_only_;
bool dump_classes_and_methods_;
bool generate_boot_image_profile_;
+ bool skip_apk_verification_;
int dump_output_to_fd_;
BootImageOptions boot_image_options_;
std::string test_profile_;
@@ -1112,6 +1251,7 @@ class ProfMan FINAL {
uint16_t test_profile_class_percentage_;
uint32_t test_profile_seed_;
uint64_t start_ns_;
+ bool copy_and_update_profile_key_;
};
// See ProfileAssistant::ProcessingResult for return codes.
@@ -1121,6 +1261,9 @@ static int profman(int argc, char** argv) {
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
profman.ParseArgs(argc, argv);
+ // Initialize MemMap for ZipArchive::OpenFromFd.
+ MemMap::Init();
+
if (profman.ShouldGenerateTestProfile()) {
return profman.GenerateTestProfile();
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 2657f4fa86..db9bceaf29 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -14,6 +14,83 @@
// limitations under the License.
//
+cc_defaults {
+ name: "libdexfile_defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: [
+ "dex/compact_dex_debug_info.cc",
+ "dex/compact_dex_file.cc",
+ "dex/descriptors_names.cc",
+ "dex/dex_file.cc",
+ "dex/dex_file_exception_helpers.cc",
+ "dex/dex_file_loader.cc",
+ "dex/dex_file_tracking_registrar.cc",
+ "dex/dex_file_verifier.cc",
+ "dex/dex_instruction.cc",
+ "dex/modifiers.cc",
+ "dex/standard_dex_file.cc",
+ "dex/utf.cc",
+ ],
+
+ target: {
+ android: {
+ static_libs: [
+ "libziparchive",
+ "libz",
+ "libbase",
+ ],
+ shared_libs: [
+ "libutils",
+ ],
+ },
+ host: {
+ shared_libs: [
+ "libziparchive",
+ "libz",
+ ],
+ },
+ },
+ generated_sources: ["dexfile_operator_srcs"],
+ include_dirs: [
+ "external/zlib",
+ ],
+ shared_libs: [
+ "liblog",
+ // For common macros.
+ "libbase",
+ "libz",
+ ],
+
+ // Exporting "." would shadow the system elf.h with our elf.h,
+ // which in turn breaks any tools that reference this library.
+ // export_include_dirs: ["."],
+}
+
+gensrcs {
+ name: "dexfile_operator_srcs",
+ cmd: "$(location generate-operator-out.py) art/runtime $(in) > $(out)",
+ tool_files: ["generate-operator-out.py"],
+ srcs: [
+ "dex/dex_file.h",
+ "dex/dex_file_layout.h",
+ "dex/dex_instruction.h",
+ "dex/dex_instruction_utils.h",
+ "dex/invoke_type.h",
+ ],
+ output_extension: "operator_out.cc",
+}
+
+art_cc_library {
+ name: "libdexfile",
+ defaults: ["libdexfile_defaults"],
+ // Leave the symbols in the shared library so that stack unwinders can
+ // produce meaningful name resolution.
+ strip: {
+ keep_symbols: true,
+ },
+}
+
// Keep the __jit_debug_register_code symbol as a unique symbol during ICF for architectures where
// we use gold as the linker (arm, x86, x86_64). The symbol is used by the debuggers to detect when
// new jit code is generated. We don't want it to be called when a different function with the same
@@ -56,16 +133,9 @@ cc_defaults {
"common_throws.cc",
"compiler_filter.cc",
"debugger.cc",
- "dex/compact_dex_file.cc",
- "dex/dex_file.cc",
"dex/dex_file_annotations.cc",
- "dex/dex_file_exception_helpers.cc",
"dex/dex_file_layout.cc",
- "dex/dex_file_loader.cc",
- "dex/dex_file_tracking_registrar.cc",
- "dex/dex_file_verifier.cc",
- "dex/dex_instruction.cc",
- "dex/standard_dex_file.cc",
+ "dex/art_dex_file_loader.cc",
"dex_to_dex_decompiler.cc",
"elf_file.cc",
"exec_utils.cc",
@@ -222,7 +292,6 @@ cc_defaults {
"trace.cc",
"transaction.cc",
"type_lookup_table.cc",
- "utf.cc",
"utils.cc",
"vdex_file.cc",
"verifier/instruction_flags.cc",
@@ -418,6 +487,7 @@ cc_defaults {
"jni_platform_headers",
],
shared_libs: [
+ "libdexfile",
"libnativebridge",
"libnativeloader",
"libbacktrace",
@@ -454,10 +524,7 @@ gensrcs {
"debugger.h",
"base/unix_file/fd_file.h",
"class_status.h",
- "dex/dex_file.h",
"dex/dex_file_layout.h",
- "dex/dex_instruction.h",
- "dex/dex_instruction_utils.h",
"gc_root.h",
"gc/allocator_type.h",
"gc/allocator/rosalloc.h",
@@ -470,7 +537,6 @@ gensrcs {
"image.h",
"instrumentation.h",
"indirect_reference_table.h",
- "invoke_type.h",
"jdwp_provider.h",
"jdwp/jdwp.h",
"jdwp/jdwp_constants.h",
@@ -572,10 +638,12 @@ art_cc_test {
"class_table_test.cc",
"compiler_filter_test.cc",
"dex/code_item_accessors_test.cc",
+ "dex/compact_dex_debug_info_test.cc",
"dex/compact_dex_file_test.cc",
"dex/dex_file_test.cc",
"dex/dex_file_verifier_test.cc",
"dex/dex_instruction_test.cc",
+ "dex/utf_test.cc",
"entrypoints/math_entrypoints_test.cc",
"entrypoints/quick/quick_trampoline_entrypoints_test.cc",
"entrypoints_order_test.cc",
@@ -610,6 +678,7 @@ art_cc_test {
"leb128_test.cc",
"mem_map_test.cc",
"memory_region_test.cc",
+ "method_handles_test.cc",
"mirror/dex_cache_test.cc",
"mirror/method_type_test.cc",
"mirror/object_test.cc",
@@ -620,6 +689,7 @@ art_cc_test {
"oat_file_assistant_test.cc",
"parsed_options_test.cc",
"prebuilt_tools_test.cc",
+ "primitive_test.cc",
"reference_table_test.cc",
"runtime_callbacks_test.cc",
"subtype_check_info_test.cc",
@@ -627,7 +697,6 @@ art_cc_test {
"thread_pool_test.cc",
"transaction_test.cc",
"type_lookup_table_test.cc",
- "utf_test.cc",
"utils_test.cc",
"vdex_file_test.cc",
"verifier/method_verifier_test.cc",
@@ -636,6 +705,7 @@ art_cc_test {
],
shared_libs: [
"libbacktrace",
+ "libziparchive",
],
header_libs: [
"art_cmdlineparser_headers", // For parsed_options_test.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 78b9e46d77..80080e9832 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -144,6 +144,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c09baea72a..737d2a86a1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -794,27 +794,24 @@ END art_quick_unlock_object_no_inline
.extern artInstanceOfFromCode
.extern artThrowClassCastExceptionForObject
ENTRY art_quick_check_instance_of
- push {r0-r1, lr} @ save arguments, link register and pad
- .cfi_adjust_cfa_offset 12
+ push {r0-r2, lr} @ save arguments, padding (r2) and link register
+ .cfi_adjust_cfa_offset 16
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
- .cfi_rel_offset lr, 8
- sub sp, #4
- .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset lr, 12
bl artInstanceOfFromCode
cbz r0, .Lthrow_class_cast_exception
- add sp, #4
- .cfi_adjust_cfa_offset -4
- pop {r0-r1, pc}
- .cfi_adjust_cfa_offset 4 @ Reset unwind info so following code unwinds.
+ pop {r0-r2, pc}
+
.Lthrow_class_cast_exception:
- add sp, #4
- .cfi_adjust_cfa_offset -4
- pop {r0-r1, lr}
- .cfi_adjust_cfa_offset -12
+ pop {r0-r2, lr}
+ .cfi_adjust_cfa_offset -16
.cfi_restore r0
.cfi_restore r1
+ .cfi_restore r2
.cfi_restore lr
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 80bf3abc6f..4c43b7ed3d 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -168,6 +168,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 96a1cadab9..b0e7b0a964 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1341,12 +1341,14 @@ ENTRY art_quick_check_instance_of
// Call runtime code
bl artInstanceOfFromCode
+ // Restore LR.
+ RESTORE_REG xLR, 24
+
// Check for exception
cbz x0, .Lthrow_class_cast_exception
// Restore and return
.cfi_remember_state
- RESTORE_REG xLR, 24
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
ret
.cfi_restore_state // Reset unwind info so following code unwinds.
@@ -1354,7 +1356,6 @@ ENTRY art_quick_check_instance_of
.Lthrow_class_cast_exception:
// Restore
- RESTORE_REG xLR, 24
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
index 1020781b95..3a6625f9cd 100644
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ b/runtime/arch/mips/entrypoints_direct_mips.h
@@ -54,6 +54,7 @@ static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
entrypoint == kQuickAsin ||
entrypoint == kQuickAtan ||
entrypoint == kQuickAtan2 ||
+ entrypoint == kQuickPow ||
entrypoint == kQuickCbrt ||
entrypoint == kQuickCosh ||
entrypoint == kQuickExp ||
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 209f36705a..badee59568 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -348,6 +348,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
static_assert(IsDirectEntrypoint(kQuickAtan), "Direct C stub marked non-direct.");
qpoints->pAtan2 = atan2;
static_assert(IsDirectEntrypoint(kQuickAtan2), "Direct C stub marked non-direct.");
+ qpoints->pPow = pow;
+ static_assert(IsDirectEntrypoint(kQuickPow), "Direct C stub marked non-direct.");
qpoints->pCbrt = cbrt;
static_assert(IsDirectEntrypoint(kQuickCbrt), "Direct C stub marked non-direct.");
qpoints->pCosh = cosh;
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 35cbd1dcc0..bdfb9421df 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -165,6 +165,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 63f4f6cb8c..58e0e44813 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2207,8 +2207,9 @@ ENTRY art_quick_instrumentation_entry
# Deliver exception if we got nullptr as function.
move $t9, $v0 # $t9 holds reference to code
ld $a0, 8($sp) # Restore arg0.
+ dla $v0, art_quick_instrumentation_exit
RESTORE_SAVE_REFS_AND_ARGS_FRAME
- dla $ra, art_quick_instrumentation_exit
+ move $ra, $v0
jic $t9, 0 # call method, returning to art_quick_instrumentation_exit
.Ldeliver_instrumentation_entry_exception:
RESTORE_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index bd51809c22..4be4b12611 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -186,10 +186,9 @@ class StubTest : public CommonRuntimeTest {
"stp x2, x3, [sp, #16]\n\t"
"stp x4, x5, [sp, #32]\n\t"
"stp x6, x7, [sp, #48]\n\t"
- // To be extra defensive, store x20. We do this because some of the stubs might make a
+ // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
// transition into the runtime via the blr instruction below and *not* save x20.
- "str x20, [sp, #64]\n\t"
- // 8 byte buffer
+ "stp x20, x21, [sp, #64]\n\t"
"sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
".cfi_adjust_cfa_offset 16\n\t"
@@ -288,7 +287,7 @@ class StubTest : public CommonRuntimeTest {
"ldp x2, x3, [sp, #16]\n\t"
"ldp x4, x5, [sp, #32]\n\t"
"ldp x6, x7, [sp, #48]\n\t"
- "ldr x20, [sp, #64]\n\t"
+ "ldp x20, x21, [sp, #64]\n\t"
"add sp, sp, #80\n\t" // Free stack space, now sp as on entry
".cfi_adjust_cfa_offset -80\n\t"
@@ -312,8 +311,9 @@ class StubTest : public CommonRuntimeTest {
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
+ // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 24bf9cc07c..ffb0c94cc7 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -68,6 +68,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 93cb6656dc..5a28120b30 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1436,17 +1436,18 @@ DEFINE_FUNCTION art_quick_check_instance_of
PUSH eax // pass arg1 - obj
call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testl %eax, %eax
- jz 1f // jump forward if not assignable
+ jz .Lthrow_class_cast_exception // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
-
CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds.
-1:
+
+.Lthrow_class_cast_exception:
POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3656f83b58..6bae69c495 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -91,6 +91,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 85f972309b..781ade99ce 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1410,21 +1410,21 @@ DEFINE_FUNCTION art_quick_check_instance_of
SETUP_FP_CALLEE_SAVE_FRAME
call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testq %rax, %rax
- jz 1f // jump forward if not assignable
+ jz .Lthrow_class_cast_exception // jump forward if not assignable
+ CFI_REMEMBER_STATE
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(24), %rsp // pop arguments
CFI_ADJUST_CFA_OFFSET(-24)
-
-.Lreturn:
ret
+ CFI_RESTORE_STATE // Reset unwind info so following code unwinds.
- CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
-1:
+.Lthrow_class_cast_exception:
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(8), %rsp // pop padding
CFI_ADJUST_CFA_OFFSET(-8)
POP rsi // Pop arguments
POP rdi
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 46b013da7e..0eeeef2f2f 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -20,8 +20,8 @@
#include <jni.h>
#include "dex/dex_file_types.h"
+#include "dex/modifiers.h"
#include "gc_root.h"
-#include "modifiers.h"
#include "obj_ptr.h"
#include "offsets.h"
#include "primitive.h"
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index bdebe2d9e9..e6e35c89c9 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -27,8 +27,9 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_types.h"
+#include "dex/invoke_type.h"
#include "gc_root-inl.h"
-#include "invoke_type.h"
+#include "intrinsics_enum.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
@@ -376,6 +377,22 @@ inline bool ArtMethod::HasSingleImplementation() {
return (GetAccessFlags() & kAccSingleImplementation) != 0;
}
+inline bool ArtMethod::IsHiddenIntrinsic(uint32_t ordinal) {
+ switch (static_cast<Intrinsics>(ordinal)) {
+ case Intrinsics::kReferenceGetReferent:
+ case Intrinsics::kSystemArrayCopyChar:
+ case Intrinsics::kStringGetCharsNoCheck:
+ case Intrinsics::kVarHandleFullFence:
+ case Intrinsics::kVarHandleAcquireFence:
+ case Intrinsics::kVarHandleReleaseFence:
+ case Intrinsics::kVarHandleLoadLoadFence:
+ case Intrinsics::kVarHandleStoreStoreFence:
+ return true;
+ default:
+ return false;
+ }
+}
+
inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
// Currently we only do intrinsics for static/final methods or methods of final
// classes. We don't set kHasSingleImplementation for those methods.
@@ -398,6 +415,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
bool is_default_conflict = IsDefaultConflicting();
bool is_compilable = IsCompilable();
bool must_count_locks = MustCountLocks();
+ HiddenApiAccessFlags::ApiList hidden_api_list = GetHiddenApiAccessFlags();
SetAccessFlags(new_value);
DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
DCHECK_EQ(is_constructor, IsConstructor());
@@ -411,6 +429,15 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
DCHECK_EQ(is_compilable, IsCompilable());
DCHECK_EQ(must_count_locks, MustCountLocks());
+ if (kIsDebugBuild) {
+ if (IsHiddenIntrinsic(intrinsic)) {
+ // Special case some of our intrinsics because the access flags clash
+ // with the intrinsics ordinal.
+ DCHECK_EQ(HiddenApiAccessFlags::kWhitelist, GetHiddenApiAccessFlags());
+ } else {
+ DCHECK_EQ(hidden_api_list, GetHiddenApiAccessFlags());
+ }
+ }
} else {
SetAccessFlags(new_value);
}
@@ -459,7 +486,15 @@ inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, PointerSize poi
}
inline CodeItemInstructionAccessor ArtMethod::DexInstructions() {
- return CodeItemInstructionAccessor(this);
+ return CodeItemInstructionAccessor(*GetDexFile(), GetCodeItem());
+}
+
+inline CodeItemDataAccessor ArtMethod::DexInstructionData() {
+ return CodeItemDataAccessor(*GetDexFile(), GetCodeItem());
+}
+
+inline CodeItemDebugInfoAccessor ArtMethod::DexInstructionDebugInfo() {
+ return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
}
} // namespace art
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 44a5dde485..efdf5991ec 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -272,7 +272,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
// Default to handler not found.
uint32_t found_dex_pc = dex::kDexNoIndex;
// Iterate over the catch handlers associated with dex_pc.
- CodeItemDataAccessor accessor(this);
+ CodeItemDataAccessor accessor(DexInstructionData());
for (CatchHandlerIterator it(accessor, dex_pc); it.HasNext(); it.Next()) {
dex::TypeIndex iter_type_idx = it.GetHandlerTypeIndex();
// Catch all case
@@ -562,14 +562,14 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
-const uint8_t* ArtMethod::GetQuickenedInfo() {
+ArrayRef<const uint8_t> ArtMethod::GetQuickenedInfo() {
const DexFile& dex_file = GetDeclaringClass()->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
- return nullptr;
+ return ArrayRef<const uint8_t>();
}
- return oat_dex_file->GetOatFile()->GetVdexFile()->GetQuickenedInfoOf(
- dex_file, GetCodeItemOffset());
+ return oat_dex_file->GetOatFile()->GetVdexFile()->GetQuickenedInfoOf(dex_file,
+ GetDexMethodIndex());
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index c4a586ed92..cec2ec4df2 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/enums.h"
@@ -30,8 +31,8 @@
#include "dex/code_item_accessors.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction_iterator.h"
+#include "dex/modifiers.h"
#include "gc_root.h"
-#include "modifiers.h"
#include "obj_ptr.h"
#include "offsets.h"
#include "primitive.h"
@@ -335,6 +336,10 @@ class ArtMethod FINAL {
AddAccessFlags(kAccMustCountLocks);
}
+ HiddenApiAccessFlags::ApiList GetHiddenApiAccessFlags() {
+ return HiddenApiAccessFlags::DecodeFromRuntime(GetAccessFlags());
+ }
+
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -662,7 +667,11 @@ class ArtMethod FINAL {
return hotness_count_;
}
- const uint8_t* GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
+ static MemberOffset HotnessCountOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(ArtMethod, hotness_count_));
+ }
+
+ ArrayRef<const uint8_t> GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
// methods will return null for this method, as they are not oat based.
@@ -722,6 +731,14 @@ class ArtMethod FINAL {
ALWAYS_INLINE CodeItemInstructionAccessor DexInstructions()
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns the dex code item data section of the DexFile for the art method.
+ ALWAYS_INLINE CodeItemDataAccessor DexInstructionData()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns the dex code item debug info section of the DexFile for the art method.
+ ALWAYS_INLINE CodeItemDebugInfoAccessor DexInstructionDebugInfo()
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
@@ -748,7 +765,7 @@ class ArtMethod FINAL {
// ifTable.
uint16_t method_index_;
- // The hotness we measure for this method. Managed by the interpreter. Not atomic, as we allow
+ // The hotness we measure for this method. Not atomic, as we allow
// missing increments: if the method is hot, we will see it eventually.
uint16_t hotness_count_;
@@ -841,6 +858,9 @@ class ArtMethod FINAL {
} while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
}
+ // Returns true if the given intrinsic is considered hidden.
+ bool IsHiddenIntrinsic(uint32_t ordinal);
+
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 3cf2b93690..2f7d6ab98f 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -73,7 +73,7 @@ ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET \
- (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 161) * __SIZEOF_POINTER__)
+ (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 162) * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index cc413c5ab9..e87f631c2e 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -95,6 +95,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"CHA ",
"Scheduler ",
"Profile ",
+ "SBCloner ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 9e03658aef..beaba67fe0 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -101,6 +101,7 @@ enum ArenaAllocKind {
kArenaAllocCHA,
kArenaAllocScheduler,
kArenaAllocProfile,
+ kArenaAllocSuperblockCloner,
kNumArenaAllocKinds
};
diff --git a/runtime/base/bit_string.h b/runtime/base/bit_string.h
index bfbe8eaf71..7d9fb70de7 100644
--- a/runtime/base/bit_string.h
+++ b/runtime/base/bit_string.h
@@ -114,13 +114,13 @@ inline std::ostream& operator<<(std::ostream& os, const BitStringChar& bc) {
/**
* BitString
*
- * lsb (least significant bit) msb
- * +------------+------------+------------+-----+------------+
- * | | | | | |
- * | Char0 | Char1 | Char2 | ... | CharN |
- * | | | | | |
- * +------------+------------+------------+-----+------------+
- * <- len[0] -> <- len[1] -> <- len[2] -> ... <- len[N] ->
+ * MSB (most significant bit) LSB
+ * +------------+-----+------------+------------+------------+
+ * | | | | | |
+ * | CharN | ... | Char2 | Char1 | Char0 |
+ * | | | | | |
+ * +------------+-----+------------+------------+------------+
+ * <- len[N] -> ... <- len[2] -> <- len[1] -> <- len[0] ->
*
* Stores up to "N+1" characters in a subset of a machine word. Each character has a different
* bitlength, as defined by len[pos]. This BitString can be nested inside of a BitStruct
@@ -145,7 +145,7 @@ struct BitString {
// As this is meant to be used only with "SubtypeCheckInfo",
// the bitlengths and the maximum string length is tuned by maximizing the coverage of "Assigned"
// bitstrings for instance-of and check-cast targets during Optimizing compilation.
- static constexpr size_t kBitSizeAtPosition[] = {12, 3, 8}; // len[] from header docs.
+ static constexpr size_t kBitSizeAtPosition[] = {12, 4, 11}; // len[] from header docs.
static constexpr size_t kCapacity = arraysize(kBitSizeAtPosition); // MaxBitstringLen above.
// How many bits are needed to represent BitString[0..position)?
@@ -165,8 +165,7 @@ struct BitString {
// (e.g. to use with BitField{Insert,Extract,Clear}.)
static constexpr size_t GetLsbForPosition(size_t position) {
DCHECK_GE(kCapacity, position);
- constexpr size_t kMaximumBitLength = GetBitLengthTotalAtPosition(kCapacity);
- return kMaximumBitLength - GetBitLengthTotalAtPosition(position + 1u);
+ return GetBitLengthTotalAtPosition(position);
}
// How many bits are needed for a BitStringChar at the position?
@@ -183,9 +182,7 @@ struct BitString {
BitStringChar operator[](size_t idx) const {
DCHECK_LT(idx, kCapacity);
- StorageType data =
- BitFieldExtract(storage_,
- GetLsbForPosition(idx), kBitSizeAtPosition[idx]);
+ StorageType data = BitFieldExtract(storage_, GetLsbForPosition(idx), kBitSizeAtPosition[idx]);
return BitStringChar(data, kBitSizeAtPosition[idx]);
}
@@ -259,17 +256,10 @@ struct BitString {
DCHECK_GE(kCapacity, end);
BitString copy = *this;
- size_t bit_size = 0;
- for (size_t idx = end; idx < kCapacity; ++idx) {
- bit_size += kBitSizeAtPosition[idx];
- }
- // TODO: precompute above table.
-
- if (bit_size > 0) {
- StorageType data =
- BitFieldClear(copy.storage_,
- GetLsbForPosition(kCapacity),
- bit_size);
+ if (end < kCapacity) {
+ size_t lsb = GetLsbForPosition(end);
+ size_t bit_size = GetLsbForPosition(kCapacity) - lsb;
+ StorageType data = BitFieldClear(copy.storage_, lsb, bit_size);
copy.storage_ = data;
}
diff --git a/runtime/base/bit_string_test.cc b/runtime/base/bit_string_test.cc
index 96aa154ef3..23274e3f2f 100644
--- a/runtime/base/bit_string_test.cc
+++ b/runtime/base/bit_string_test.cc
@@ -65,7 +65,7 @@ size_t AsUint(const T& value) {
return uint_value;
}
-// Make max bitstring, e.g. BitString[4095,7,255] for {12,3,8}
+// Make max bitstring, e.g. BitString[4095,15,2047] for {12,4,11}
template <size_t kCount = BitString::kCapacity>
BitString MakeBitStringMax() {
BitString bs{};
@@ -87,15 +87,14 @@ BitString SetBitStringCharAt(BitString bit_string, size_t i, size_t val) {
#define EXPECT_BITSTRING_STR(expected_str, actual_value) \
EXPECT_STREQ((expected_str), Stringify((actual_value)).c_str())
+// TODO: Consider removing this test, it's kind of replicating the logic in GetLsbForPosition().
TEST(InstanceOfBitString, GetLsbForPosition) {
ASSERT_LE(3u, BitString::kCapacity);
// Test will fail if kCapacity is not at least 3. Update it.
- EXPECT_EQ(0u, BitString::GetLsbForPosition(BitString::kCapacity - 1u));
- EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u],
- BitString::GetLsbForPosition(BitString::kCapacity - 2u));
- EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u] +
- BitString::kBitSizeAtPosition[BitString::kCapacity - 2u],
- BitString::GetLsbForPosition(BitString::kCapacity - 3u));
+ EXPECT_EQ(0u, BitString::GetLsbForPosition(0u));
+ EXPECT_EQ(BitString::kBitSizeAtPosition[0u], BitString::GetLsbForPosition(1u));
+ EXPECT_EQ(BitString::kBitSizeAtPosition[0u] + BitString::kBitSizeAtPosition[1u],
+ BitString::GetLsbForPosition(2u));
}
TEST(InstanceOfBitString, ToString) {
@@ -126,8 +125,8 @@ TEST(InstanceOfBitString, ReadWrite) {
// Each maximal value should be tested here for each position.
uint32_t max_bitstring_ints[] = {
MaxInt<uint32_t>(12),
- MaxInt<uint32_t>(3),
- MaxInt<uint32_t>(8),
+ MaxInt<uint32_t>(4),
+ MaxInt<uint32_t>(11),
};
// Update tests if changing the tuning values above.
@@ -151,14 +150,13 @@ constexpr auto MaxForPos() {
}
TEST(InstanceOfBitString, MemoryRepresentation) {
- // Verify that the lower positions are stored in more significant bits.
+ // Verify that the lower positions are stored in less significant bits.
BitString bs = MakeBitString({MaxForPos<0>(), MaxForPos<1>()});
BitString::StorageType as_int = static_cast<BitString::StorageType>(bs);
- // Below tests assumes the capacity is 3. Update if it this changes.
- ASSERT_EQ(3u, BitString::kCapacity);
- EXPECT_EQ(MaxForPos<0>() << (BitString::kBitSizeAtPosition[2] + BitString::kBitSizeAtPosition[1]) |
- (MaxForPos<1>() << BitString::kBitSizeAtPosition[2]),
+ // Below tests assumes the capacity is at least 3.
+ ASSERT_LE(3u, BitString::kCapacity);
+ EXPECT_EQ((MaxForPos<0>() << 0) | (MaxForPos<1>() << BitString::kBitSizeAtPosition[0]),
as_int);
}
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 34cddbff6a..d2a99f1a39 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -46,10 +46,14 @@ template<typename T>
constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
- "T too large, must be smaller than long long");
+ static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
+ static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t),
+ "Unsupported sizeof(T)");
DCHECK_NE(x, 0u);
- return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x);
+ constexpr bool is_64_bit = (sizeof(T) == sizeof(uint64_t));
+ constexpr size_t adjustment =
+ is_64_bit ? 0u : std::numeric_limits<uint32_t>::digits - std::numeric_limits<T>::digits;
+ return is_64_bit ? __builtin_clzll(x) : __builtin_clz(x) - adjustment;
}
// Similar to CLZ except that on zero input it returns bitwidth and supports signed integers.
@@ -65,10 +69,10 @@ constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
// that T is an unsigned type.
- static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
- "T too large, must be smaller than long long");
+ static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t),
+ "Unsupported sizeof(T)");
DCHECK_NE(x, static_cast<T>(0));
- return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x);
+ return (sizeof(T) == sizeof(uint64_t)) ? __builtin_ctzll(x) : __builtin_ctz(x);
}
// Similar to CTZ except that on zero input it returns bitwidth and supports signed integers.
diff --git a/runtime/base/file_utils.cc b/runtime/base/file_utils.cc
index 63b4ac56d0..58990f344b 100644
--- a/runtime/base/file_utils.cc
+++ b/runtime/base/file_utils.cc
@@ -50,10 +50,10 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_instruction.h"
+#include "dex/utf-inl.h"
#include "oat_quick_method_header.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
-#include "utf-inl.h"
#if defined(__APPLE__)
#include <crt_externs.h>
@@ -353,4 +353,9 @@ int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, in
return 0;
}
+bool LocationIsOnSystem(const char* location) {
+ UniqueCPtr<const char[]> path(realpath(location, nullptr));
+ return path != nullptr && android::base::StartsWith(path.get(), GetAndroidRoot().c_str());
+}
+
} // namespace art
diff --git a/runtime/base/file_utils.h b/runtime/base/file_utils.h
index e4555ad3cb..cac0950d9c 100644
--- a/runtime/base/file_utils.h
+++ b/runtime/base/file_utils.h
@@ -82,6 +82,9 @@ int64_t GetFileSizeBytes(const std::string& filename);
// Madvise the largest page aligned region within begin and end.
int MadviseLargestPageAlignedRegion(const uint8_t* begin, const uint8_t* end, int advice);
+// Return whether the location is on system (i.e. android root).
+bool LocationIsOnSystem(const char* location);
+
} // namespace art
#endif // ART_RUNTIME_BASE_FILE_UTILS_H_
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9f17ad051c..a4c32dd814 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -74,6 +74,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
@@ -1073,6 +1074,7 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
+ DCHECK(native_debug_interface_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1228,6 +1230,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+ DCHECK(native_debug_interface_lock_ == nullptr);
+ native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 7077298ca9..bf27b7f17c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,10 +58,12 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kNativeDebugInterfaceLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kSignalHandlingLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
@@ -101,6 +103,7 @@ enum LockLevel {
kAllocatedThreadIdsLock,
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
+ kDexToDexCompilerLock,
kJitCodeCacheLock,
kCHALock,
kSubtypeCheckLock,
@@ -744,8 +747,11 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the magic global variables used by native tools (e.g. libunwind).
+ static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
- static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+ static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
// List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
// avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 0c29e257a1..e2ad7fd83f 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -67,7 +67,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 877654247c..800427d6ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -53,6 +53,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
#include "dex/dex_file_loader.h"
+#include "dex/utf.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "experimental_flags.h"
@@ -72,6 +73,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "java_vm_ext.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profile_compilation_info.h"
@@ -115,7 +117,6 @@
#include "thread-inl.h"
#include "thread_list.h"
#include "trace.h"
-#include "utf.h"
#include "utils.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
@@ -3286,7 +3287,15 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it,
const uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
dst->SetDeclaringClass(klass.Get());
- dst->SetAccessFlags(it.GetFieldAccessFlags());
+
+ // Get access flags from the DexFile. If this is a boot class path class,
+ // also set its runtime hidden API access flags.
+ uint32_t access_flags = it.GetFieldAccessFlags();
+ if (klass->IsBootStrapClassLoaded()) {
+ access_flags =
+ HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags());
+ }
+ dst->SetAccessFlags(access_flags);
}
void ClassLinker::LoadMethod(const DexFile& dex_file,
@@ -3302,8 +3311,15 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
dst->SetDeclaringClass(klass.Get());
dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
+ // Get access flags from the DexFile. If this is a boot class path class,
+ // also set its runtime hidden API access flags.
uint32_t access_flags = it.GetMethodAccessFlags();
+ if (klass->IsBootStrapClassLoaded()) {
+ access_flags =
+ HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags());
+ }
+
if (UNLIKELY(strcmp("finalize", method_name) == 0)) {
// Set finalizable flag on declaring class.
if (strcmp("V", dex_file.GetShorty(method_id.proto_idx_)) == 0) {
@@ -3417,6 +3433,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.class_table = ClassTableForClassLoader(class_loader);
+ RegisterDexFileForNative(self, data.dex_file->Begin());
DCHECK(data.class_table != nullptr);
// Make sure to hold the dex cache live in the class table. This case happens for the boot class
// path dex caches without an image.
@@ -4237,17 +4254,16 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file,
ClassStatus& oat_file_class_status) {
// If we're compiling, we can only verify the class using the oat file if
// we are not compiling the image or if the class we're verifying is not part of
- // the app. In other words, we will only check for preverification of bootclasspath
- // classes.
+ // the compilation unit (app - dependencies). We will let the compiler callback
+ // tell us about the latter.
if (Runtime::Current()->IsAotCompiler()) {
+ CompilerCallbacks* callbacks = Runtime::Current()->GetCompilerCallbacks();
// Are we compiling the bootclasspath?
- if (Runtime::Current()->GetCompilerCallbacks()->IsBootImage()) {
+ if (callbacks->IsBootImage()) {
return false;
}
// We are compiling an app (not the image).
-
- // Is this an app class? (I.e. not a bootclasspath class)
- if (klass->GetClassLoader() != nullptr) {
+ if (!callbacks->CanUseOatStatusForVerification(klass.Ptr())) {
return false;
}
}
@@ -4315,7 +4331,7 @@ void ClassLinker::ResolveClassExceptionHandlerTypes(Handle<mirror::Class> klass)
void ClassLinker::ResolveMethodExceptionHandlerTypes(ArtMethod* method) {
// similar to DexVerifier::ScanTryCatchBlocks and dex2oat's ResolveExceptionsForMethod.
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
if (!accessor.HasCodeItem()) {
return; // native or abstract method
}
@@ -8354,7 +8370,6 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
@@ -8478,19 +8493,20 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
return nullptr;
}
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
int32_t index = 0;
-
if (receiver_count != 0) {
// Insert receiver
method_params->Set(index++, target_method->GetDeclaringClass());
}
-
- DexFileParameterIterator it(*dex_file, target_method->GetPrototype());
+ DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
+ Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
+ Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
while (it.HasNext()) {
+ DCHECK_LT(index, num_params);
const dex::TypeIndex type_idx = it.GetTypeIdx();
- ObjPtr<mirror::Class> klass = ResolveType(type_idx, dex_cache, class_loader);
+ ObjPtr<mirror::Class> klass = ResolveType(type_idx,
+ target_method_dex_cache,
+ target_method_class_loader);
if (nullptr == klass) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8540,7 +8556,7 @@ ObjPtr<mirror::MethodHandle> ClassLinker::ResolveMethodHandle(Thread* self,
case DexFile::MethodHandleType::kInvokeConstructor:
case DexFile::MethodHandleType::kInvokeDirect:
case DexFile::MethodHandleType::kInvokeInterface:
- return ResolveMethodHandleForMethod(self, dex_file, method_handle, referrer);
+ return ResolveMethodHandleForMethod(self, method_handle, referrer);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3e3425f5ac..16fa1ce801 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -979,7 +979,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 3ec5335a80..e646520f3d 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -21,6 +21,7 @@
#include "base/stl_util.h"
#include "class_linker.h"
#include "class_loader_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "handle_scope-inl.h"
@@ -203,6 +204,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// We may get resource-only apks which we cannot load.
// TODO(calin): Refine the dex opening interface to be able to tell if an archive contains
// no dex files. So that we can distinguish the real failures...
+ const ArtDexFileLoader dex_file_loader;
for (ClassLoaderInfo& info : class_loader_chain_) {
size_t opened_dex_files_index = info.opened_dex_files.size();
for (const std::string& cp_elem : info.classpath) {
@@ -215,12 +217,12 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
std::string error_msg;
// When opening the dex files from the context we expect their checksum to match their
// contents. So pass true to verify_checksum.
- if (!DexFileLoader::Open(location.c_str(),
- location.c_str(),
- Runtime::Current()->IsVerificationEnabled(),
- /*verify_checksum*/ true,
- &error_msg,
- &info.opened_dex_files)) {
+ if (!dex_file_loader.Open(location.c_str(),
+ location.c_str(),
+ Runtime::Current()->IsVerificationEnabled(),
+ /*verify_checksum*/ true,
+ &error_msg,
+ &info.opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
// from its corresponding oat file.
// This could happen when we need to recompile a pre-build whose dex code has been stripped.
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index bc726354a8..4689ae4c3f 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -278,14 +278,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFiles) {
VerifyOpenDexFiles(context.get(), 1, &all_dex_files1);
}
-static std::string CreateRelativeString(const std::string& in, const char* cwd) {
+// Creates a relative path from cwd to 'in'. Returns false if it cannot be done.
+// TODO We should somehow support this in all situations. b/72042237.
+static bool CreateRelativeString(const std::string& in, const char* cwd, std::string* out) {
int cwd_len = strlen(cwd);
if (!android::base::StartsWith(in, cwd) || (cwd_len < 1)) {
- LOG(FATAL) << in << " " << cwd;
+ return false;
}
bool contains_trailing_slash = (cwd[cwd_len - 1] == '/');
int start_position = cwd_len + (contains_trailing_slash ? 0 : 1);
- return in.substr(start_position);
+ *out = in.substr(start_position);
+ return true;
}
TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) {
@@ -293,9 +296,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) {
if (getcwd(cwd_buf, arraysize(cwd_buf)) == nullptr) {
PLOG(FATAL) << "Could not get working directory";
}
- std::string multidex_name = CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf);
- std::string myclass_dex_name = CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf);
- std::string dex_name = CreateRelativeString(GetTestDexFileName("Main"), cwd_buf);
+ std::string multidex_name;
+ std::string myclass_dex_name;
+ std::string dex_name;
+ if (!CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf, &multidex_name) ||
+ !CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf, &myclass_dex_name) ||
+ !CreateRelativeString(GetTestDexFileName("Main"), cwd_buf, &dex_name)) {
+ LOG(ERROR) << "Test OpenValidDexFilesRelative cannot be run because target dex files have no "
+ << "relative path.";
+ SUCCEED();
+ return;
+ }
std::unique_ptr<ClassLoaderContext> context =
@@ -321,10 +332,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesClasspathDir) {
if (getcwd(cwd_buf, arraysize(cwd_buf)) == nullptr) {
PLOG(FATAL) << "Could not get working directory";
}
- std::string multidex_name = CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf);
- std::string myclass_dex_name = CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf);
- std::string dex_name = CreateRelativeString(GetTestDexFileName("Main"), cwd_buf);
-
+ std::string multidex_name;
+ std::string myclass_dex_name;
+ std::string dex_name;
+ if (!CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf, &multidex_name) ||
+ !CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf, &myclass_dex_name) ||
+ !CreateRelativeString(GetTestDexFileName("Main"), cwd_buf, &dex_name)) {
+ LOG(ERROR) << "Test OpenValidDexFilesClasspathDir cannot be run because target dex files have "
+ << "no relative path.";
+ SUCCEED();
+ return;
+ }
std::unique_ptr<ClassLoaderContext> context =
ClassLoaderContext::Create(
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 96d660fd64..39dbebfdf2 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -35,6 +35,7 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "compiler_callbacks.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "gc/heap.h"
@@ -375,7 +376,8 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile(
std::string error_msg;
MemMap::Init();
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
@@ -574,12 +576,13 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFi
std::string filename = GetTestDexFileName(name);
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFileLoader::Open(filename.c_str(),
- filename.c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg, &dex_files);
+ bool success = dex_file_loader.Open(filename.c_str(),
+ filename.c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1c73240eea..0aed70a330 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -311,6 +311,12 @@ class CheckJniAbortCatcher {
printf("WARNING: TEST DISABLED FOR COMPACT DEX\n"); \
return; \
}
+
+#define TEST_DISABLED_FOR_HEAP_POISONING() \
+ if (kPoisonHeapReferences) { \
+ printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
+ return; \
+ }
} // namespace art
#endif // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 92d86519dc..19e7f7686d 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -26,7 +26,7 @@
#include "class_linker-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_instruction-inl.h"
-#include "invoke_type.h"
+#include "dex/invoke_type.h"
#include "mirror/class-inl.h"
#include "mirror/method_type.h"
#include "mirror/object-inl.h"
@@ -551,7 +551,7 @@ static bool IsValidImplicitCheck(uintptr_t addr, const Instruction& instr)
void ThrowNullPointerExceptionFromDexPC(bool check_address, uintptr_t addr) {
uint32_t throw_dex_pc;
ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
- CodeItemInstructionAccessor accessor(method);
+ CodeItemInstructionAccessor accessor(method->DexInstructions());
CHECK_LT(throw_dex_pc, accessor.InsnsSizeInCodeUnits());
const Instruction& instr = accessor.InstructionAt(throw_dex_pc);
if (check_address && !IsValidImplicitCheck(addr, instr)) {
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index 4560bca922..8395966404 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -25,6 +25,12 @@ namespace art {
class CompilerDriver;
+namespace mirror {
+
+class Class;
+
+} // namespace mirror
+
namespace verifier {
class MethodVerifier;
@@ -68,6 +74,11 @@ class CompilerCallbacks {
virtual void UpdateClassState(ClassReference ref ATTRIBUTE_UNUSED,
ClassStatus state ATTRIBUTE_UNUSED) {}
+ virtual bool CanUseOatStatusForVerification(mirror::Class* klass ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return false;
+ }
+
protected:
explicit CompilerCallbacks(CallbackMode mode) : mode_(mode) { }
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 842cd7330c..61ad725b79 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -37,6 +37,7 @@
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_types.h"
#include "dex/dex_instruction.h"
+#include "dex/utf.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/allocation_record.h"
@@ -66,7 +67,6 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "thread_list.h"
-#include "utf.h"
#include "well_known_classes.h"
namespace art {
@@ -1533,7 +1533,7 @@ static uint32_t MangleAccessFlags(uint32_t accessFlags) {
*/
static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
REQUIRES_SHARED(Locks::mutator_lock_) {
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (!accessor.HasCodeItem()) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
@@ -1564,7 +1564,7 @@ static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
*/
static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
REQUIRES_SHARED(Locks::mutator_lock_) {
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (!accessor.HasCodeItem()) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
// return the slot as is since all registers are arguments.
@@ -1675,7 +1675,7 @@ void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::Expan
}
};
ArtMethod* m = FromMethodId(method_id);
- CodeItemDebugInfoAccessor accessor(m);
+ CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
uint64_t start, end;
if (!accessor.HasCodeItem()) {
DCHECK(m->IsNative() || m->IsProxyMethod());
@@ -1741,7 +1741,7 @@ void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool wi
}
};
ArtMethod* m = FromMethodId(method_id);
- CodeItemDebugInfoAccessor accessor(m);
+ CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
// arg_count considers doubles and longs to take 2 units.
// variable_count considers everything to take 1 unit.
@@ -1791,7 +1791,7 @@ JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
if (m == nullptr) {
return JDWP::ERR_INVALID_METHODID;
}
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
size_t byte_count = accessor.InsnsSizeInCodeUnits() * 2;
const uint8_t* begin = reinterpret_cast<const uint8_t*>(accessor.Insns());
const uint8_t* end = begin + byte_count;
@@ -3908,7 +3908,7 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
// Note: if the thread is not running Java code (pure native thread), there is no "current"
// method on the stack (and no line number either).
if (m != nullptr && !m->IsNative()) {
- CodeItemDebugInfoAccessor accessor(m);
+ CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
DebugCallbackContext context(single_step_control, line_number, accessor.InsnsSizeInCodeUnits());
m->GetDexFile()->DecodeDebugPositionInfo(accessor.DebugInfoOffset(),
DebugCallbackContext::Callback,
diff --git a/runtime/dex/art_dex_file_loader.cc b/runtime/dex/art_dex_file_loader.cc
new file mode 100644
index 0000000000..08cf30d5bf
--- /dev/null
+++ b/runtime/dex/art_dex_file_loader.cc
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_dex_file_loader.h"
+
+#include <sys/mman.h> // For the PROT_* and MAP_* constants.
+#include <sys/stat.h>
+
+#include "android-base/stringprintf.h"
+
+#include "base/file_magic.h"
+#include "base/stl_util.h"
+#include "base/systrace.h"
+#include "base/unix_file/fd_file.h"
+#include "compact_dex_file.h"
+#include "dex_file.h"
+#include "dex_file_verifier.h"
+#include "standard_dex_file.h"
+#include "zip_archive.h"
+
+namespace art {
+
+namespace {
+
+class MemMapContainer : public DexFileContainer {
+ public:
+ explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+ virtual ~MemMapContainer() OVERRIDE { }
+
+ int GetPermissions() OVERRIDE {
+ if (mem_map_.get() == nullptr) {
+ return 0;
+ } else {
+ return mem_map_->GetProtect();
+ }
+ }
+
+ bool IsReadOnly() OVERRIDE {
+ return GetPermissions() == PROT_READ;
+ }
+
+ bool EnableWrite() OVERRIDE {
+ CHECK(IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+
+ bool DisableWrite() OVERRIDE {
+ CHECK(!IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ);
+ }
+ }
+
+ private:
+ std::unique_ptr<MemMap> mem_map_;
+ DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
+};
+
+} // namespace
+
+using android::base::StringPrintf;
+
+static constexpr OatDexFile* kNoOatDexFile = nullptr;
+
+
+bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd) const {
+ CHECK(checksums != nullptr);
+ uint32_t magic;
+
+ File fd;
+ if (zip_fd != -1) {
+ if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
+ fd = File(zip_fd, false /* check_usage */);
+ }
+ } else {
+ fd = OpenAndReadMagic(filename, &magic, error_msg);
+ }
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
+ error_msg->c_str());
+ return false;
+ }
+
+ uint32_t i = 0;
+ std::string zip_entry_name = GetMultiDexClassesDexName(i++);
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ if (zip_entry.get() == nullptr) {
+ *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
+ zip_entry_name.c_str(), error_msg->c_str());
+ return false;
+ }
+
+ do {
+ checksums->push_back(zip_entry->GetCrc32());
+ zip_entry_name = GetMultiDexClassesDexName(i++);
+ zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ } while (zip_entry.get() != nullptr);
+ return true;
+ }
+ if (IsMagicValid(magic)) {
+ std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
+ filename,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ /* mmap_shared */ false,
+ error_msg));
+ if (dex_file == nullptr) {
+ return false;
+ }
+ checksums->push_back(dex_file->GetHeader().checksum_);
+ return true;
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file from RAM ") + location);
+ return OpenCommon(base,
+ size,
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ location_checksum,
+ oat_dex_file,
+ verify,
+ verify_checksum,
+ error_msg,
+ /*container*/ nullptr,
+ /*verify_result*/ nullptr);
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
+ CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ location_checksum,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
+ return dex_file;
+}
+
+bool ArtDexFileLoader::Open(const char* filename,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
+ uint32_t magic;
+ File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
+ }
+ if (IsMagicValid(magic)) {
+ std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
+ location,
+ verify,
+ verify_checksum,
+ /* mmap_shared */ false,
+ error_msg));
+ if (dex_file.get() != nullptr) {
+ dex_files->push_back(std::move(dex_file));
+ return true;
+ } else {
+ return false;
+ }
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenDex(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const {
+ ScopedTrace trace("Open dex file " + std::string(location));
+ return OpenFile(fd, location, verify, verify_checksum, mmap_shared, error_msg);
+}
+
+bool ArtDexFileLoader::OpenZip(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace("Dex file open Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ return OpenAllDexFilesFromZip(
+ *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<MemMap> map;
+ {
+ File delayed_close(fd, /* check_usage */ false);
+ struct stat sbuf;
+ memset(&sbuf, 0, sizeof(sbuf));
+ if (fstat(fd, &sbuf) == -1) {
+ *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
+ strerror(errno));
+ return nullptr;
+ }
+ if (S_ISDIR(sbuf.st_mode)) {
+ *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
+ return nullptr;
+ }
+ size_t length = sbuf.st_size;
+ map.reset(MemMap::MapFile(length,
+ PROT_READ,
+ mmap_shared ? MAP_SHARED : MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/false,
+ location.c_str(),
+ error_msg));
+ if (map == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ }
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ dex_header->checksum_,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
+
+ return dex_file;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
+ const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const {
+ ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
+ if (zip_entry == nullptr) {
+ *error_code = ZipOpenErrorCode::kEntryNotFound;
+ return nullptr;
+ }
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ return nullptr;
+ }
+
+ std::unique_ptr<MemMap> map;
+ if (zip_entry->IsUncompressed()) {
+ if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
+ // Do not mmap unaligned ZIP entries because
+ // doing so would fail dex verification which requires 4 byte alignment.
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
+ << "Falling back to extracting file.";
+ } else {
+ // Map uncompressed files within zip as file-backed to avoid a dirty copy.
+ map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
+ if (map == nullptr) {
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "is your ZIP file corrupted? Falling back to extraction.";
+ // Try again with Extraction which still has a chance of recovery.
+ }
+ }
+ }
+
+ if (map == nullptr) {
+ // Default path for compressed ZIP entries,
+ // and fallback for stored ZIP entries.
+ map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+ }
+
+ if (map == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
+ error_msg->c_str());
+ *error_code = ZipOpenErrorCode::kExtractToMemoryError;
+ return nullptr;
+ }
+ VerifyResult verify_result;
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ zip_entry->GetCrc32(),
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ &verify_result);
+ if (dex_file == nullptr) {
+ if (verify_result == VerifyResult::kVerifyNotAttempted) {
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ } else {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ }
+ return nullptr;
+ }
+ if (!dex_file->DisableWrite()) {
+ *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
+ *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
+ return nullptr;
+ }
+ CHECK(dex_file->IsReadOnly()) << location;
+ if (verify_result != VerifyResult::kVerifySucceeded) {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ return nullptr;
+ }
+ *error_code = ZipOpenErrorCode::kNoError;
+ return dex_file;
+}
+
+// Technically we do not have a limitation with respect to the number of dex files that can be in a
+// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
+// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
+// seems an excessive number.
+static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
+
+bool ArtDexFileLoader::OpenAllDexFilesFromZip(
+ const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace("Dex file open from Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
+ ZipOpenErrorCode error_code;
+ std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
+ kClassesDex,
+ location,
+ verify,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (dex_file.get() == nullptr) {
+ return false;
+ } else {
+ // Had at least classes.dex.
+ dex_files->push_back(std::move(dex_file));
+
+ // Now try some more.
+
+ // We could try to avoid std::string allocations by working on a char array directly. As we
+ // do not expect a lot of iterations, this seems too involved and brittle.
+
+ for (size_t i = 1; ; ++i) {
+ std::string name = GetMultiDexClassesDexName(i);
+ std::string fake_location = GetMultiDexLocation(i, location.c_str());
+ std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
+ name.c_str(),
+ fake_location,
+ verify,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (next_dex_file.get() == nullptr) {
+ if (error_code != ZipOpenErrorCode::kEntryNotFound) {
+ LOG(WARNING) << "Zip open failed: " << *error_msg;
+ }
+ break;
+ } else {
+ dex_files->push_back(std::move(next_dex_file));
+ }
+
+ if (i == kWarnOnManyDexFilesThreshold) {
+ LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
+ << " dex files. Please consider coalescing and shrinking the number to "
+ " avoid runtime overhead.";
+ }
+
+ if (i == std::numeric_limits<size_t>::max()) {
+ LOG(ERROR) << "Overflow in number of dex files!";
+ break;
+ }
+ }
+
+ return true;
+ }
+}
+
+} // namespace art
diff --git a/runtime/dex/art_dex_file_loader.h b/runtime/dex/art_dex_file_loader.h
new file mode 100644
index 0000000000..b31d1e94e0
--- /dev/null
+++ b/runtime/dex/art_dex_file_loader.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+#define ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "dex_file_loader.h"
+#include "base/macros.h"
+
+namespace art {
+
+class DexFile;
+class DexFileContainer;
+class MemMap;
+class OatDexFile;
+class ZipArchive;
+
+// Class that is used to open dex files and deal with corresponding multidex and location logic.
+class ArtDexFileLoader : public DexFileLoader {
+ public:
+ virtual ~ArtDexFileLoader() { }
+
+ // Returns the checksums of a file for comparison with GetLocationChecksum().
+ // For .dex files, this is the single header checksum.
+ // For zip files, this is the zip entry CRC32 checksum for classes.dex and
+ // each additional multidex entry classes2.dex, classes3.dex, etc.
+ // If a valid zip_fd is provided the file content will be read directly from
+ // the descriptor and `filename` will be used as alias for error logging. If
+ // zip_fd is -1, the method will try to open the `filename` and read the
+ // content from it.
+ // Return true if the checksums could be found, false otherwise.
+ bool GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd = -1) const OVERRIDE;
+
+ // Opens .dex file, backed by existing memory
+ std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const OVERRIDE;
+
+ // Opens .dex file that has been memory-mapped by the caller.
+ std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const;
+
+ // Opens all .dex files found in the file, guessing the container format based on file extension.
+ bool Open(const char* filename,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ // Open a single dex file from an fd. This function closes the fd.
+ std::unique_ptr<const DexFile> OpenDex(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const;
+
+ // Opens dex files from within a .jar, .zip, or .apk file
+ bool OpenZip(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ private:
+ std::unique_ptr<const DexFile> OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const;
+
+ // Open all classesXXX.dex files from a zip archive.
+ bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
+ // return.
+ std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
diff --git a/runtime/dex/code_item_accessors-inl.h b/runtime/dex/code_item_accessors-inl.h
index 2792dc0663..9c39935d3b 100644
--- a/runtime/dex/code_item_accessors-inl.h
+++ b/runtime/dex/code_item_accessors-inl.h
@@ -17,31 +17,186 @@
#ifndef ART_RUNTIME_DEX_CODE_ITEM_ACCESSORS_INL_H_
#define ART_RUNTIME_DEX_CODE_ITEM_ACCESSORS_INL_H_
-#include "code_item_accessors-no_art-inl.h"
+#include "code_item_accessors.h"
-#include "art_method-inl.h"
#include "compact_dex_file.h"
#include "dex_file-inl.h"
-#include "oat_file.h"
#include "standard_dex_file.h"
+// The no ART version is used by binaries that don't include the whole runtime.
namespace art {
-inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(ArtMethod* method)
- : CodeItemInstructionAccessor(*method->GetDexFile(), method->GetCodeItem()) {}
+inline void CodeItemInstructionAccessor::Init(uint32_t insns_size_in_code_units,
+ const uint16_t* insns) {
+ insns_size_in_code_units_ = insns_size_in_code_units;
+ insns_ = insns;
+}
+
+inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+ uint32_t insns_size_in_code_units;
+ code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ true>(
+ &insns_size_in_code_units,
+ /*registers_size*/ nullptr,
+ /*ins_size*/ nullptr,
+ /*outs_size*/ nullptr,
+ /*tries_size*/ nullptr);
+ Init(insns_size_in_code_units, code_item.insns_);
+}
-inline CodeItemDataAccessor::CodeItemDataAccessor(ArtMethod* method)
- : CodeItemDataAccessor(*method->GetDexFile(), method->GetCodeItem()) {}
+inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ Init(code_item.insns_size_in_code_units_, code_item.insns_);
+}
+
+inline void CodeItemInstructionAccessor::Init(const DexFile& dex_file,
+ const DexFile::CodeItem* code_item) {
+ if (code_item != nullptr) {
+ DCHECK(dex_file.IsInDataSection(code_item));
+ if (dex_file.IsCompactDexFile()) {
+ Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ } else {
+ DCHECK(dex_file.IsStandardDexFile());
+ Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ }
+ }
+}
+
+inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
+ const DexFile& dex_file,
+ const DexFile::CodeItem* code_item) {
+ Init(dex_file, code_item);
+}
+
+inline DexInstructionIterator CodeItemInstructionAccessor::begin() const {
+ return DexInstructionIterator(insns_, 0u);
+}
-inline CodeItemDebugInfoAccessor::CodeItemDebugInfoAccessor(ArtMethod* method)
- : CodeItemDebugInfoAccessor(*method->GetDexFile(), method->GetCodeItem()) {}
+inline DexInstructionIterator CodeItemInstructionAccessor::end() const {
+ return DexInstructionIterator(insns_, insns_size_in_code_units_);
+}
+
+inline IterationRange<DexInstructionIterator> CodeItemInstructionAccessor::InstructionsFrom(
+ uint32_t start_dex_pc) const {
+ DCHECK_LT(start_dex_pc, InsnsSizeInCodeUnits());
+ return {
+ DexInstructionIterator(insns_, start_dex_pc),
+ DexInstructionIterator(insns_, insns_size_in_code_units_) };
+}
+
+inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+ uint32_t insns_size_in_code_units;
+ code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ false>(&insns_size_in_code_units,
+ &registers_size_,
+ &ins_size_,
+ &outs_size_,
+ &tries_size_);
+ CodeItemInstructionAccessor::Init(insns_size_in_code_units, code_item.insns_);
+}
+
+inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ CodeItemInstructionAccessor::Init(code_item);
+ registers_size_ = code_item.registers_size_;
+ ins_size_ = code_item.ins_size_;
+ outs_size_ = code_item.outs_size_;
+ tries_size_ = code_item.tries_size_;
+}
+
+inline void CodeItemDataAccessor::Init(const DexFile& dex_file,
+ const DexFile::CodeItem* code_item) {
+ if (code_item != nullptr) {
+ if (dex_file.IsCompactDexFile()) {
+ CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ } else {
+ DCHECK(dex_file.IsStandardDexFile());
+ CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ }
+ }
+}
-inline CodeItemDebugInfoAccessor::CodeItemDebugInfoAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
+inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile& dex_file,
+ const DexFile::CodeItem* code_item) {
+ Init(dex_file, code_item);
+}
+
+inline IterationRange<const DexFile::TryItem*> CodeItemDataAccessor::TryItems() const {
+ const DexFile::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
+ return {
+ try_items,
+ try_items + TriesSize() };
+}
+
+inline const uint8_t* CodeItemDataAccessor::GetCatchHandlerData(size_t offset) const {
+ return DexFile::GetCatchHandlerData(end(), TriesSize(), offset);
+}
+
+inline const DexFile::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
+ IterationRange<const DexFile::TryItem*> try_items(TryItems());
+ int32_t index = DexFile::FindTryItem(try_items.begin(),
+ try_items.end() - try_items.begin(),
+ try_dex_pc);
+ return index != -1 ? &try_items.begin()[index] : nullptr;
+}
+
+inline const void* CodeItemDataAccessor::CodeItemDataEnd() const {
+ const uint8_t* handler_data = GetCatchHandlerData();
+
+ if (TriesSize() == 0 || handler_data == nullptr) {
+ return &end().Inst();
+ }
+ // Get the start of the handler data.
+ const uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
+ // Manually read each handler.
+ for (uint32_t i = 0; i < handlers_size; ++i) {
+ int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
+ if (uleb128_count <= 0) {
+ uleb128_count = -uleb128_count + 1;
+ }
+ for (int32_t j = 0; j < uleb128_count; ++j) {
+ DecodeUnsignedLeb128(&handler_data);
+ }
+ }
+ return reinterpret_cast<const void*>(handler_data);
+}
+
+inline void CodeItemDebugInfoAccessor::Init(const DexFile& dex_file,
+ const DexFile::CodeItem* code_item,
+ uint32_t dex_method_index) {
if (code_item == nullptr) {
return;
}
- Init(dex_file, code_item, OatFile::GetDebugInfoOffset(dex_file, code_item->debug_info_off_));
+ dex_file_ = &dex_file;
+ if (dex_file.IsCompactDexFile()) {
+ Init(down_cast<const CompactDexFile::CodeItem&>(*code_item), dex_method_index);
+ } else {
+ DCHECK(dex_file.IsStandardDexFile());
+ Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
+ }
+}
+
+inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item,
+ uint32_t dex_method_index) {
+ debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
+ dex_method_index);
+ CodeItemDataAccessor::Init(code_item);
+}
+
+inline void CodeItemDebugInfoAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ debug_info_offset_ = code_item.debug_info_off_;
+ CodeItemDataAccessor::Init(code_item);
+}
+
+template<typename NewLocalCallback>
+inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(bool is_static,
+ uint32_t method_idx,
+ NewLocalCallback new_local,
+ void* context) const {
+ return dex_file_->DecodeDebugLocalInfo(RegistersSize(),
+ InsSize(),
+ InsnsSizeInCodeUnits(),
+ DebugInfoOffset(),
+ is_static,
+ method_idx,
+ new_local,
+ context);
}
} // namespace art
diff --git a/runtime/dex/code_item_accessors-no_art-inl.h b/runtime/dex/code_item_accessors-no_art-inl.h
index baea856e71..8082be3818 100644
--- a/runtime/dex/code_item_accessors-no_art-inl.h
+++ b/runtime/dex/code_item_accessors-no_art-inl.h
@@ -17,169 +17,7 @@
#ifndef ART_RUNTIME_DEX_CODE_ITEM_ACCESSORS_NO_ART_INL_H_
#define ART_RUNTIME_DEX_CODE_ITEM_ACCESSORS_NO_ART_INL_H_
-#include "code_item_accessors.h"
-
-#include "compact_dex_file.h"
-#include "dex_file-inl.h"
-#include "standard_dex_file.h"
-
-// The no ART version is used by binaries that don't include the whole runtime.
-namespace art {
-
-inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
- insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
- insns_ = code_item.insns_;
-}
-
-inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
- insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
- insns_ = code_item.insns_;
-}
-
-inline void CodeItemInstructionAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
- if (code_item != nullptr) {
- DCHECK(dex_file.HasAddress(code_item));
- if (dex_file.IsCompactDexFile()) {
- Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
- } else {
- DCHECK(dex_file.IsStandardDexFile());
- Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
- }
- }
-}
-
-inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
- const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
- Init(dex_file, code_item);
-}
-
-inline DexInstructionIterator CodeItemInstructionAccessor::begin() const {
- return DexInstructionIterator(insns_, 0u);
-}
-
-inline DexInstructionIterator CodeItemInstructionAccessor::end() const {
- return DexInstructionIterator(insns_, insns_size_in_code_units_);
-}
-
-inline IterationRange<DexInstructionIterator> CodeItemInstructionAccessor::InstructionsFrom(
- uint32_t start_dex_pc) const {
- DCHECK_LT(start_dex_pc, InsnsSizeInCodeUnits());
- return {
- DexInstructionIterator(insns_, start_dex_pc),
- DexInstructionIterator(insns_, insns_size_in_code_units_) };
-}
-
-inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
- CodeItemInstructionAccessor::Init(code_item);
- registers_size_ = code_item.registers_size_;
- ins_size_ = code_item.ins_size_;
- outs_size_ = code_item.outs_size_;
- tries_size_ = code_item.tries_size_;
-}
-
-inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
- CodeItemInstructionAccessor::Init(code_item);
- registers_size_ = code_item.registers_size_;
- ins_size_ = code_item.ins_size_;
- outs_size_ = code_item.outs_size_;
- tries_size_ = code_item.tries_size_;
-}
-
-inline void CodeItemDataAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
- if (code_item != nullptr) {
- if (dex_file.IsCompactDexFile()) {
- CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
- } else {
- DCHECK(dex_file.IsStandardDexFile());
- CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
- }
- }
-}
-
-inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item) {
- Init(dex_file, code_item);
-}
-
-inline IterationRange<const DexFile::TryItem*> CodeItemDataAccessor::TryItems() const {
- const DexFile::TryItem* try_items = DexFile::GetTryItems(end(), 0u);
- return {
- try_items,
- try_items + TriesSize() };
-}
-
-inline const uint8_t* CodeItemDataAccessor::GetCatchHandlerData(size_t offset) const {
- return DexFile::GetCatchHandlerData(end(), TriesSize(), offset);
-}
-
-inline const DexFile::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_dex_pc) const {
- IterationRange<const DexFile::TryItem*> try_items(TryItems());
- int32_t index = DexFile::FindTryItem(try_items.begin(),
- try_items.end() - try_items.begin(),
- try_dex_pc);
- return index != -1 ? &try_items.begin()[index] : nullptr;
-}
-
-inline const void* CodeItemDataAccessor::CodeItemDataEnd() const {
- const uint8_t* handler_data = GetCatchHandlerData();
-
- if (TriesSize() == 0 || handler_data == nullptr) {
- return &end().Inst();
- }
- // Get the start of the handler data.
- const uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
- // Manually read each handler.
- for (uint32_t i = 0; i < handlers_size; ++i) {
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t j = 0; j < uleb128_count; ++j) {
- DecodeUnsignedLeb128(&handler_data);
- }
- }
- return reinterpret_cast<const void*>(handler_data);
-}
-
-inline void CodeItemDebugInfoAccessor::Init(const DexFile& dex_file,
- const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset) {
- dex_file_ = &dex_file;
- debug_info_offset_ = debug_info_offset;
- if (dex_file.IsCompactDexFile()) {
- Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
- } else {
- DCHECK(dex_file.IsStandardDexFile());
- Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
- }
-}
-
-inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item) {
- CodeItemDataAccessor::Init(code_item);
-}
-
-inline void CodeItemDebugInfoAccessor::Init(const StandardDexFile::CodeItem& code_item) {
- CodeItemDataAccessor::Init(code_item);
-}
-
-template<typename NewLocalCallback>
-inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(bool is_static,
- uint32_t method_idx,
- NewLocalCallback new_local,
- void* context) const {
- return dex_file_->DecodeDebugLocalInfo(RegistersSize(),
- InsSize(),
- InsnsSizeInCodeUnits(),
- DebugInfoOffset(),
- is_static,
- method_idx,
- new_local,
- context);
-}
-
-} // namespace art
+// TODO: delete this file once system/core is updated.
+#include "code_item_accessors-inl.h"
#endif // ART_RUNTIME_DEX_CODE_ITEM_ACCESSORS_NO_ART_INL_H_
diff --git a/runtime/dex/code_item_accessors.h b/runtime/dex/code_item_accessors.h
index b5a6957548..beb78f6e4f 100644
--- a/runtime/dex/code_item_accessors.h
+++ b/runtime/dex/code_item_accessors.h
@@ -66,6 +66,7 @@ class CodeItemInstructionAccessor {
protected:
CodeItemInstructionAccessor() = default;
+ ALWAYS_INLINE void Init(uint32_t insns_size_in_code_units, const uint16_t* insns);
ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
@@ -84,8 +85,6 @@ class CodeItemDataAccessor : public CodeItemInstructionAccessor {
public:
ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const DexFile::CodeItem* code_item);
- ALWAYS_INLINE explicit CodeItemDataAccessor(ArtMethod* method);
-
uint16_t RegistersSize() const {
return registers_size_;
}
@@ -131,20 +130,16 @@ class CodeItemDebugInfoAccessor : public CodeItemDataAccessor {
public:
CodeItemDebugInfoAccessor() = default;
- // Handles null code items, but not null dex files.
- ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile& dex_file,
- const DexFile::CodeItem* code_item);
-
// Initialize with an existing offset.
ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile& dex_file,
const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset) {
- Init(dex_file, code_item, debug_info_offset);
+ uint32_t dex_method_index) {
+ Init(dex_file, code_item, dex_method_index);
}
ALWAYS_INLINE void Init(const DexFile& dex_file,
const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset);
+ uint32_t dex_method_index);
ALWAYS_INLINE explicit CodeItemDebugInfoAccessor(ArtMethod* method);
@@ -159,7 +154,7 @@ class CodeItemDebugInfoAccessor : public CodeItemDataAccessor {
void* context) const;
protected:
- ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item, uint32_t dex_method_index);
ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
private:
diff --git a/runtime/dex/code_item_accessors_test.cc b/runtime/dex/code_item_accessors_test.cc
index b29d10b113..1bd12a6f09 100644
--- a/runtime/dex/code_item_accessors_test.cc
+++ b/runtime/dex/code_item_accessors_test.cc
@@ -16,9 +16,11 @@
#include "code_item_accessors-inl.h"
+#include <sys/mman.h>
#include <memory>
#include "common_runtime_test.h"
+#include "art_dex_file_loader.h"
#include "dex_file_loader.h"
#include "mem_map.h"
@@ -38,19 +40,23 @@ std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex) {
&error_msg));
CHECK(map != nullptr) << error_msg;
if (compact_dex) {
- CompactDexFile::WriteMagic(map->Begin());
- CompactDexFile::WriteCurrentVersion(map->Begin());
+ CompactDexFile::Header* header =
+ const_cast<CompactDexFile::Header*>(CompactDexFile::Header::At(map->Begin()));
+ CompactDexFile::WriteMagic(header->magic_);
+ CompactDexFile::WriteCurrentVersion(header->magic_);
+ header->data_off_ = 0;
+ header->data_size_ = map->Size();
} else {
StandardDexFile::WriteMagic(map->Begin());
StandardDexFile::WriteCurrentVersion(map->Begin());
}
- std::unique_ptr<const DexFile> dex(
- DexFileLoader::Open("location",
- /*location_checksum*/ 123,
- std::move(map),
- /*verify*/false,
- /*verify_checksum*/false,
- &error_msg));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open("location",
+ /*location_checksum*/ 123,
+ std::move(map),
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error_msg));
CHECK(dex != nullptr) << error_msg;
return dex;
}
@@ -61,8 +67,8 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
ASSERT_TRUE(standard_dex != nullptr);
std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true));
ASSERT_TRUE(compact_dex != nullptr);
- static constexpr uint16_t kRegisterSize = 1;
- static constexpr uint16_t kInsSize = 2;
+ static constexpr uint16_t kRegisterSize = 2;
+ static constexpr uint16_t kInsSize = 1;
static constexpr uint16_t kOutsSize = 3;
static constexpr uint16_t kTriesSize = 4;
// debug_info_off_ is not accessible from the helpers yet.
@@ -96,12 +102,16 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
verify_code_item(standard_dex.get(), dex_code_item, dex_code_item->insns_);
CompactDexFile::CodeItem* cdex_code_item =
- reinterpret_cast<CompactDexFile::CodeItem*>(const_cast<uint8_t*>(compact_dex->Begin()));
- cdex_code_item->registers_size_ = kRegisterSize;
- cdex_code_item->ins_size_ = kInsSize;
- cdex_code_item->outs_size_ = kOutsSize;
- cdex_code_item->tries_size_ = kTriesSize;
- cdex_code_item->insns_size_in_code_units_ = kInsnsSizeInCodeUnits;
+ reinterpret_cast<CompactDexFile::CodeItem*>(const_cast<uint8_t*>(compact_dex->Begin() +
+ CompactDexFile::CodeItem::kMaxPreHeaderSize * sizeof(uint16_t)));
+ std::vector<uint16_t> preheader;
+ cdex_code_item->Create(kRegisterSize,
+ kInsSize,
+ kOutsSize,
+ kTriesSize,
+ kInsnsSizeInCodeUnits,
+ cdex_code_item->GetPreHeader());
+
verify_code_item(compact_dex.get(), cdex_code_item, cdex_code_item->insns_);
}
diff --git a/runtime/dex/compact_dex_debug_info.cc b/runtime/dex/compact_dex_debug_info.cc
new file mode 100644
index 0000000000..19495ca92c
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compact_dex_debug_info.h"
+
+#include "compact_dex_utils.h"
+#include "leb128.h"
+
+namespace art {
+
+constexpr size_t CompactDexDebugInfoOffsetTable::kElementsPerIndex;
+
+CompactDexDebugInfoOffsetTable::Accessor::Accessor(const uint8_t* data_begin,
+ uint32_t debug_info_base,
+ uint32_t debug_info_table_offset)
+ : table_(reinterpret_cast<const uint32_t*>(data_begin + debug_info_table_offset)),
+ debug_info_base_(debug_info_base),
+ data_begin_(data_begin) {}
+
+uint32_t CompactDexDebugInfoOffsetTable::Accessor::GetDebugInfoOffset(uint32_t method_idx) const {
+ const uint32_t offset = table_[method_idx / kElementsPerIndex];
+ const size_t bit_index = method_idx % kElementsPerIndex;
+
+ const uint8_t* block = data_begin_ + offset;
+ uint16_t bit_mask = *block;
+ ++block;
+ bit_mask = (bit_mask << kBitsPerByte) | *block;
+ ++block;
+ if ((bit_mask & (1 << bit_index)) == 0) {
+ // Bit is not set means the offset is 0 for the debug info.
+ return 0u;
+ }
+ // Trim off the bits above the index we want and count how many bits are set. This is how many
+ // lebs we need to decode.
+ size_t count = POPCOUNT(static_cast<uintptr_t>(bit_mask) << (kBitsPerIntPtrT - 1 - bit_index));
+ DCHECK_GT(count, 0u);
+ uint32_t current_offset = debug_info_base_;
+ do {
+ current_offset += DecodeUnsignedLeb128(&block);
+ --count;
+ } while (count > 0);
+ return current_offset;
+}
+
+void CompactDexDebugInfoOffsetTable::Build(const std::vector<uint32_t>& debug_info_offsets,
+ std::vector<uint8_t>* out_data,
+ uint32_t* out_min_offset,
+ uint32_t* out_table_offset) {
+ DCHECK(out_data != nullptr);
+ DCHECK(out_data->empty());
+ // Calculate the base offset and return it.
+ *out_min_offset = std::numeric_limits<uint32_t>::max();
+ for (const uint32_t offset : debug_info_offsets) {
+ if (offset != 0u) {
+ *out_min_offset = std::min(*out_min_offset, offset);
+ }
+ }
+ // Write the leb blocks and store the important offsets (each kElementsPerIndex elements).
+ size_t block_start = 0;
+
+ std::vector<uint32_t> offset_table;
+
+ // Write data first then the table.
+ while (block_start < debug_info_offsets.size()) {
+ // Write the offset of the block for each block.
+ offset_table.push_back(out_data->size());
+
+ // Block size of up to kElementsPerIndex
+ const size_t block_size = std::min(debug_info_offsets.size() - block_start, kElementsPerIndex);
+
+ // Calculate bit mask since need to write that first.
+ uint16_t bit_mask = 0u;
+ for (size_t i = 0; i < block_size; ++i) {
+ if (debug_info_offsets[block_start + i] != 0u) {
+ bit_mask |= 1 << i;
+ }
+ }
+ // Write bit mask.
+ out_data->push_back(static_cast<uint8_t>(bit_mask >> kBitsPerByte));
+ out_data->push_back(static_cast<uint8_t>(bit_mask));
+
+ // Write debug info offsets relative to the current offset.
+ uint32_t current_offset = *out_min_offset;
+ for (size_t i = 0; i < block_size; ++i) {
+ const uint32_t debug_info_offset = debug_info_offsets[block_start + i];
+ if (debug_info_offset != 0u) {
+ uint32_t delta = debug_info_offset - current_offset;
+ EncodeUnsignedLeb128(out_data, delta);
+ current_offset = debug_info_offset;
+ }
+ }
+
+ block_start += block_size;
+ }
+
+ // Write the offset table.
+ AlignmentPadVector(out_data, alignof(uint32_t));
+ *out_table_offset = out_data->size();
+ out_data->insert(out_data->end(),
+ reinterpret_cast<const uint8_t*>(&offset_table[0]),
+ reinterpret_cast<const uint8_t*>(&offset_table[0] + offset_table.size()));
+}
+
+} // namespace art
diff --git a/runtime/dex/compact_dex_debug_info.h b/runtime/dex/compact_dex_debug_info.h
new file mode 100644
index 0000000000..1aff75879e
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
+#define ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
+
+#include <cstdint>
+#include <vector>
+
+namespace art {
+
+// Debug offset table for compact dex, aims to minimize size while still providing reasonable
+// speed (10-20ns average time per lookup on host).
+class CompactDexDebugInfoOffsetTable {
+ public:
+ // This value is coupled with the leb chunk bitmask. That logic must also be adjusted when the
+ // integer is modified.
+ static constexpr size_t kElementsPerIndex = 16;
+
+ // Leb block format:
+ // [uint16_t] 16 bit mask for what method ids actually have a debug info offset for the chunk.
+ // [lebs] Up to 16 lebs encoded using leb128, one leb bit. The leb specifies how the offset
+ // changes compared to the previous index.
+
+ class Accessor {
+ public:
+ Accessor(const uint8_t* data_begin,
+ uint32_t debug_info_base,
+ uint32_t debug_info_table_offset);
+
+ // Return the debug info for a method index (or 0 if it doesn't have one).
+ uint32_t GetDebugInfoOffset(uint32_t method_idx) const;
+
+ private:
+ const uint32_t* const table_;
+ const uint32_t debug_info_base_;
+ const uint8_t* const data_begin_;
+ };
+
+ // Returned offsets are all relative to debug_info_offsets.
+ static void Build(const std::vector<uint32_t>& debug_info_offsets,
+ std::vector<uint8_t>* out_data,
+ uint32_t* out_min_offset,
+ uint32_t* out_table_offset);
+
+ // 32 bit aligned for the offset table.
+ static constexpr size_t kAlignment = sizeof(uint32_t);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
diff --git a/runtime/dex/compact_dex_debug_info_test.cc b/runtime/dex/compact_dex_debug_info_test.cc
new file mode 100644
index 0000000000..02b95e68d7
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info_test.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+#include "dex/compact_dex_debug_info.h"
+#include "gtest/gtest.h"
+#include "mem_map.h"
+
+namespace art {
+
+TEST(CompactDexDebugInfoTest, TestBuildAndAccess) {
+ MemMap::Init();
+
+ const size_t kDebugInfoMinOffset = 1234567;
+ std::vector<uint32_t> offsets = {
+ 0, 17, 2, 3, 11, 0, 0, 0, 0, 1, 0, 1552, 100, 122, 44, 1234567, 0, 0,
+ std::numeric_limits<uint32_t>::max() - kDebugInfoMinOffset, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12,
+ };
+ // Add some large offset since the debug info section will never be that close to the beginning
+ // of the file.
+ for (uint32_t& offset : offsets) {
+ if (offset != 0u) {
+ offset += kDebugInfoMinOffset;
+ }
+ }
+
+ std::vector<uint8_t> data;
+ uint32_t base_offset = 0;
+ uint32_t table_offset = 0;
+ CompactDexDebugInfoOffsetTable::Build(offsets,
+ /*out*/ &data,
+ /*out*/ &base_offset,
+ /*out*/ &table_offset);
+ EXPECT_GE(base_offset, kDebugInfoMinOffset);
+ EXPECT_LT(table_offset, data.size());
+ ASSERT_GT(data.size(), 0u);
+ const size_t before_size = offsets.size() * sizeof(offsets.front());
+ EXPECT_LT(data.size(), before_size);
+
+ // Note that the accessor requires the data to be aligned. Use memmap to accomplish this.
+ std::string error_msg;
+ // Leave some extra room since we don't copy the table at the start (for testing).
+ constexpr size_t kExtraOffset = 4 * 128;
+ std::unique_ptr<MemMap> fake_dex(MemMap::MapAnonymous("fake dex",
+ nullptr,
+ data.size() + kExtraOffset,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error_msg));
+ ASSERT_TRUE(fake_dex != nullptr) << error_msg;
+ std::copy(data.begin(), data.end(), fake_dex->Begin() + kExtraOffset);
+
+ CompactDexDebugInfoOffsetTable::Accessor accessor(fake_dex->Begin() + kExtraOffset,
+ base_offset,
+ table_offset);
+ for (size_t i = 0; i < offsets.size(); ++i) {
+ EXPECT_EQ(offsets[i], accessor.GetDebugInfoOffset(i));
+ }
+
+ // Sort to produce a try and produce a smaller table. This happens because the leb diff is smaller
+ // for sorted increasing order.
+ std::sort(offsets.begin(), offsets.end());
+ std::vector<uint8_t> sorted_data;
+ CompactDexDebugInfoOffsetTable::Build(offsets,
+ /*out*/ &sorted_data,
+ /*out*/ &base_offset,
+ /*out*/ &table_offset);
+ EXPECT_LT(sorted_data.size(), data.size());
+ {
+ ScopedLogSeverity sls(LogSeverity::INFO);
+ LOG(INFO) << "raw size " << before_size
+ << " table size " << data.size()
+ << " sorted table size " << sorted_data.size();
+ }
+}
+
+} // namespace art
diff --git a/runtime/dex/compact_dex_file.cc b/runtime/dex/compact_dex_file.cc
index 2d1ee0420e..ce289d4d7b 100644
--- a/runtime/dex/compact_dex_file.cc
+++ b/runtime/dex/compact_dex_file.cc
@@ -16,7 +16,7 @@
#include "compact_dex_file.h"
-#include "code_item_accessors-no_art-inl.h"
+#include "code_item_accessors-inl.h"
#include "dex_file-inl.h"
#include "leb128.h"
@@ -56,11 +56,53 @@ bool CompactDexFile::SupportsDefaultMethods() const {
}
uint32_t CompactDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
- // TODO: Clean up this temporary code duplication with StandardDexFile. Eventually the
- // implementations will differ.
- DCHECK(HasAddress(&item));
+ DCHECK(IsInDataSection(&item));
return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
reinterpret_cast<uintptr_t>(&item);
}
+
+uint32_t CompactDexFile::CalculateChecksum(const uint8_t* base_begin,
+ size_t base_size,
+ const uint8_t* data_begin,
+ size_t data_size) {
+ Header temp_header(*Header::At(base_begin));
+ // Zero out fields that are not included in the sum.
+ temp_header.checksum_ = 0u;
+ temp_header.data_off_ = 0u;
+ temp_header.data_size_ = 0u;
+ uint32_t checksum = ChecksumMemoryRange(reinterpret_cast<const uint8_t*>(&temp_header),
+ sizeof(temp_header));
+ // Exclude the header since we already computed it's checksum.
+ checksum = (checksum * 31) ^ ChecksumMemoryRange(base_begin + sizeof(temp_header),
+ base_size - sizeof(temp_header));
+ checksum = (checksum * 31) ^ ChecksumMemoryRange(data_begin, data_size);
+ return checksum;
+}
+
+uint32_t CompactDexFile::CalculateChecksum() const {
+ return CalculateChecksum(Begin(), Size(), DataBegin(), DataSize());
+}
+
+CompactDexFile::CompactDexFile(const uint8_t* base,
+ size_t size,
+ const uint8_t* data_begin,
+ size_t data_size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base,
+ size,
+ data_begin,
+ data_size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container,
+ /*is_compact_dex*/ true),
+ debug_info_offsets_(DataBegin() + GetHeader().debug_info_offsets_pos_,
+ GetHeader().debug_info_base_,
+ GetHeader().debug_info_offsets_table_offset_) {}
+
} // namespace art
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 280c6f70cc..31aeb27872 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "dex_file.h"
+#include "dex/compact_dex_debug_info.h"
namespace art {
@@ -34,20 +35,199 @@ class CompactDexFile : public DexFile {
class Header : public DexFile::Header {
public:
+ static const Header* At(const void* at) {
+ return reinterpret_cast<const Header*>(at);
+ }
+
uint32_t GetFeatureFlags() const {
return feature_flags_;
}
+ uint32_t GetDataOffset() const {
+ return data_off_;
+ }
+
+ uint32_t GetDataSize() const {
+ return data_size_;
+ }
+
private:
uint32_t feature_flags_ = 0u;
+ // Position in the compact dex file for the debug info table data starts.
+ uint32_t debug_info_offsets_pos_ = 0u;
+
+ // Offset into the debug info table data where the lookup table is.
+ uint32_t debug_info_offsets_table_offset_ = 0u;
+
+ // Base offset of where debug info starts in the dex file.
+ uint32_t debug_info_base_ = 0u;
+
+ friend class CompactDexFile;
friend class CompactDexWriter;
};
+ // Like the standard code item except without a debug info offset. Each code item may have a
+ // preheader to encode large methods. In 99% of cases, the preheader is not used. This enables
+ // smaller size with a good fast path case in the accessors.
struct CodeItem : public DexFile::CodeItem {
+ static constexpr size_t kAlignment = sizeof(uint16_t);
+ // Max preheader size in uint16_ts.
+ static constexpr size_t kMaxPreHeaderSize = 6;
+
private:
- // TODO: Insert compact dex specific fields here.
+ CodeItem() = default;
+
+ static constexpr size_t kRegistersSizeShift = 12;
+ static constexpr size_t kInsSizeShift = 8;
+ static constexpr size_t kOutsSizeShift = 4;
+ static constexpr size_t kTriesSizeSizeShift = 0;
+ static constexpr uint16_t kFlagPreHeaderRegisterSize = 0x1 << 0;
+ static constexpr uint16_t kFlagPreHeaderInsSize = 0x1 << 1;
+ static constexpr uint16_t kFlagPreHeaderOutsSize = 0x1 << 2;
+ static constexpr uint16_t kFlagPreHeaderTriesSize = 0x1 << 3;
+ static constexpr uint16_t kFlagPreHeaderInsnsSize = 0x1 << 4;
+ static constexpr size_t kInsnsSizeShift = 5;
+ static constexpr size_t kInsnsSizeBits = sizeof(uint16_t) * kBitsPerByte - kInsnsSizeShift;
+
+ // Combined preheader flags for fast testing if we need to go slow path.
+ static constexpr uint16_t kFlagPreHeaderCombined =
+ kFlagPreHeaderRegisterSize |
+ kFlagPreHeaderInsSize |
+ kFlagPreHeaderOutsSize |
+ kFlagPreHeaderTriesSize |
+ kFlagPreHeaderInsnsSize;
+
+ // Create a code item and associated preheader if required based on field values.
+ // Returns the start of the preheader. The preheader buffer must be at least as large as
+ // kMaxPreHeaderSize;
+ uint16_t* Create(uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t outs_size,
+ uint16_t tries_size,
+ uint32_t insns_size_in_code_units,
+ uint16_t* out_preheader) {
+ // Dex verification ensures that registers size > ins_size, so we can subtract the registers
+ // size accordingly to reduce how often we need to use the preheader.
+ DCHECK_GE(registers_size, ins_size);
+ registers_size -= ins_size;
+ fields_ = (registers_size & 0xF) << kRegistersSizeShift;
+ fields_ |= (ins_size & 0xF) << kInsSizeShift;
+ fields_ |= (outs_size & 0xF) << kOutsSizeShift;
+ fields_ |= (tries_size & 0xF) << kTriesSizeSizeShift;
+ registers_size &= ~0xF;
+ ins_size &= ~0xF;
+ outs_size &= ~0xF;
+ tries_size &= ~0xF;
+ insns_count_and_flags_ = 0;
+ const size_t masked_count = insns_size_in_code_units & ((1 << kInsnsSizeBits) - 1);
+ insns_count_and_flags_ |= masked_count << kInsnsSizeShift;
+ insns_size_in_code_units -= masked_count;
+
+ // Since the preheader case is rare (1% of code items), use a suboptimally large but fast
+ // decoding format.
+ if (insns_size_in_code_units != 0) {
+ insns_count_and_flags_ |= kFlagPreHeaderInsnsSize;
+ --out_preheader;
+ *out_preheader = static_cast<uint16_t>(insns_size_in_code_units);
+ --out_preheader;
+ *out_preheader = static_cast<uint16_t>(insns_size_in_code_units >> 16);
+ }
+ auto preheader_encode = [&](uint16_t size, uint16_t flag) {
+ if (size != 0) {
+ insns_count_and_flags_ |= flag;
+ --out_preheader;
+ *out_preheader = size;
+ }
+ };
+ preheader_encode(registers_size, kFlagPreHeaderRegisterSize);
+ preheader_encode(ins_size, kFlagPreHeaderInsSize);
+ preheader_encode(outs_size, kFlagPreHeaderOutsSize);
+ preheader_encode(tries_size, kFlagPreHeaderTriesSize);
+ return out_preheader;
+ }
+
+ ALWAYS_INLINE bool HasPreHeader(uint16_t flag) const {
+ return (insns_count_and_flags_ & flag) != 0;
+ }
+
+ // Return true if the code item has any preheaders.
+ ALWAYS_INLINE static bool HasAnyPreHeader(uint16_t insns_count_and_flags) {
+ return (insns_count_and_flags & kFlagPreHeaderCombined) != 0;
+ }
+
+ ALWAYS_INLINE uint16_t* GetPreHeader() {
+ return reinterpret_cast<uint16_t*>(this);
+ }
+
+ ALWAYS_INLINE const uint16_t* GetPreHeader() const {
+ return reinterpret_cast<const uint16_t*>(this);
+ }
+
+ // Decode fields and read the preheader if necessary. If kDecodeOnlyInstructionCount is
+ // specified then only the instruction count is decoded.
+ template <bool kDecodeOnlyInstructionCount>
+ ALWAYS_INLINE void DecodeFields(uint32_t* insns_count,
+ uint16_t* registers_size,
+ uint16_t* ins_size,
+ uint16_t* outs_size,
+ uint16_t* tries_size) const {
+ *insns_count = insns_count_and_flags_ >> kInsnsSizeShift;
+ if (!kDecodeOnlyInstructionCount) {
+ const uint16_t fields = fields_;
+ *registers_size = (fields >> kRegistersSizeShift) & 0xF;
+ *ins_size = (fields >> kInsSizeShift) & 0xF;
+ *outs_size = (fields >> kOutsSizeShift) & 0xF;
+ *tries_size = (fields >> kTriesSizeSizeShift) & 0xF;
+ }
+ if (UNLIKELY(HasAnyPreHeader(insns_count_and_flags_))) {
+ const uint16_t* preheader = GetPreHeader();
+ if (HasPreHeader(kFlagPreHeaderInsnsSize)) {
+ --preheader;
+ *insns_count += static_cast<uint32_t>(*preheader);
+ --preheader;
+ *insns_count += static_cast<uint32_t>(*preheader) << 16;
+ }
+ if (!kDecodeOnlyInstructionCount) {
+ if (HasPreHeader(kFlagPreHeaderRegisterSize)) {
+ --preheader;
+ *registers_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderInsSize)) {
+ --preheader;
+ *ins_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderOutsSize)) {
+ --preheader;
+ *outs_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderTriesSize)) {
+ --preheader;
+ *tries_size += preheader[0];
+ }
+ }
+ }
+ if (!kDecodeOnlyInstructionCount) {
+ *registers_size += *ins_size;
+ }
+ }
+
+ // Packed code item data, 4 bits each: [registers_size, ins_size, outs_size, tries_size]
+ uint16_t fields_;
+
+ // 5 bits for if either of the fields required preheader extension, 11 bits for the number of
+ // instruction code units.
+ uint16_t insns_count_and_flags_;
+
+ uint16_t insns_[1]; // actual array of bytecode.
+
+ ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
+ ART_FRIEND_TEST(CompactDexFileTest, CodeItemFields);
+ friend class CodeItemDataAccessor;
+ friend class CodeItemDebugInfoAccessor;
+ friend class CodeItemInstructionAccessor;
friend class CompactDexFile;
+ friend class CompactDexWriter;
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
@@ -65,6 +245,12 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ // TODO This is completely a guess. We really need to do better. b/72402467
+ // We ask for 64 megabytes which should be big enough for any realistic dex file.
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return 64 * MB;
+ }
+
const Header& GetHeader() const {
return down_cast<const Header&>(DexFile::GetHeader());
}
@@ -73,25 +259,30 @@ class CompactDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
+ return debug_info_offsets_.GetDebugInfoOffset(dex_method_index);
+ }
+
+ static uint32_t CalculateChecksum(const uint8_t* base_begin,
+ size_t base_size,
+ const uint8_t* data_begin,
+ size_t data_size);
+ virtual uint32_t CalculateChecksum() const OVERRIDE;
+
private:
- // Not supported yet.
CompactDexFile(const uint8_t* base,
size_t size,
+ const uint8_t* data_begin,
+ size_t data_size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
- DexFileContainer* container)
- : DexFile(base,
- size,
- location,
- location_checksum,
- oat_dex_file,
- container,
- /*is_compact_dex*/ true) {}
+ DexFileContainer* container);
+
+ CompactDexDebugInfoOffsetTable::Accessor debug_info_offsets_;
friend class DexFile;
friend class DexFileLoader;
-
DISALLOW_COPY_AND_ASSIGN(CompactDexFile);
};
diff --git a/runtime/dex/compact_dex_file_test.cc b/runtime/dex/compact_dex_file_test.cc
index d665dc994b..517c5873ed 100644
--- a/runtime/dex/compact_dex_file_test.cc
+++ b/runtime/dex/compact_dex_file_test.cc
@@ -14,15 +14,14 @@
* limitations under the License.
*/
-#include "common_runtime_test.h"
+
#include "compact_dex_file.h"
#include "dex_file_loader.h"
+#include "gtest/gtest.h"
namespace art {
-class CompactDexFileTest : public CommonRuntimeTest {};
-
-TEST_F(CompactDexFileTest, MagicAndVersion) {
+TEST(CompactDexFileTest, MagicAndVersion) {
// Test permutations of valid/invalid headers.
for (size_t i = 0; i < 2; ++i) {
for (size_t j = 0; j < 2; ++j) {
@@ -45,4 +44,58 @@ TEST_F(CompactDexFileTest, MagicAndVersion) {
}
}
+TEST(CompactDexFileTest, CodeItemFields) {
+ auto test_and_write = [&] (uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t outs_size,
+ uint16_t tries_size,
+ uint32_t insns_size_in_code_units) {
+ ASSERT_GE(registers_size, ins_size);
+ uint16_t buffer[sizeof(CompactDexFile::CodeItem) +
+ CompactDexFile::CodeItem::kMaxPreHeaderSize] = {};
+ CompactDexFile::CodeItem* code_item = reinterpret_cast<CompactDexFile::CodeItem*>(
+ &buffer[CompactDexFile::CodeItem::kMaxPreHeaderSize]);
+ const uint16_t* preheader_ptr = code_item->Create(registers_size,
+ ins_size,
+ outs_size,
+ tries_size,
+ insns_size_in_code_units,
+ code_item->GetPreHeader());
+ ASSERT_GT(preheader_ptr, buffer);
+
+ uint16_t out_registers_size;
+ uint16_t out_ins_size;
+ uint16_t out_outs_size;
+ uint16_t out_tries_size;
+ uint32_t out_insns_size_in_code_units;
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount*/false>(&out_insns_size_in_code_units,
+ &out_registers_size,
+ &out_ins_size,
+ &out_outs_size,
+ &out_tries_size);
+ ASSERT_EQ(registers_size, out_registers_size);
+ ASSERT_EQ(ins_size, out_ins_size);
+ ASSERT_EQ(outs_size, out_outs_size);
+ ASSERT_EQ(tries_size, out_tries_size);
+ ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
+
+ ++out_insns_size_in_code_units; // Force value to change.
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount*/true>(&out_insns_size_in_code_units,
+ /*registers_size*/ nullptr,
+ /*ins_size*/ nullptr,
+ /*outs_size*/ nullptr,
+ /*tries_size*/ nullptr);
+ ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
+ };
+ static constexpr uint32_t kMax32 = std::numeric_limits<uint32_t>::max();
+ static constexpr uint16_t kMax16 = std::numeric_limits<uint16_t>::max();
+ test_and_write(0, 0, 0, 0, 0);
+ test_and_write(kMax16, kMax16, kMax16, kMax16, kMax32);
+ test_and_write(kMax16 - 1, kMax16 - 2, kMax16 - 3, kMax16 - 4, kMax32 - 5);
+ test_and_write(kMax16 - 4, kMax16 - 5, kMax16 - 3, kMax16 - 2, kMax32 - 1);
+ test_and_write(5, 4, 3, 2, 1);
+ test_and_write(5, 0, 3, 2, 1);
+ test_and_write(kMax16, 0, kMax16 / 2, 1234, kMax32 / 4);
+}
+
} // namespace art
diff --git a/runtime/dex/compact_dex_utils.h b/runtime/dex/compact_dex_utils.h
new file mode 100644
index 0000000000..1c7e9514fd
--- /dev/null
+++ b/runtime/dex/compact_dex_utils.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
+#define ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
+
+#include <vector>
+
+#include "base/bit_utils.h"
+
+namespace art {
+
+// Add padding to the end of the array until the size is aligned.
+template <typename T, template<typename> class Allocator>
+static inline void AlignmentPadVector(std::vector<T, Allocator<T>>* dest,
+ size_t alignment) {
+ while (!IsAlignedParam(dest->size(), alignment)) {
+ dest->push_back(T());
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
diff --git a/runtime/dex/descriptors_names.cc b/runtime/dex/descriptors_names.cc
new file mode 100644
index 0000000000..8124e7256f
--- /dev/null
+++ b/runtime/dex/descriptors_names.cc
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "descriptors_names.h"
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "dex/utf-inl.h"
+
+namespace art {
+
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
+void AppendPrettyDescriptor(const char* descriptor, std::string* result) {
+ // Count the number of '['s to get the dimensionality.
+ const char* c = descriptor;
+ size_t dim = 0;
+ while (*c == '[') {
+ dim++;
+ c++;
+ }
+
+ // Reference or primitive?
+ if (*c == 'L') {
+ // "[[La/b/C;" -> "a.b.C[][]".
+ c++; // Skip the 'L'.
+ } else {
+ // "[[B" -> "byte[][]".
+ // To make life easier, we make primitives look like unqualified
+ // reference types.
+ switch (*c) {
+ case 'B': c = "byte;"; break;
+ case 'C': c = "char;"; break;
+ case 'D': c = "double;"; break;
+ case 'F': c = "float;"; break;
+ case 'I': c = "int;"; break;
+ case 'J': c = "long;"; break;
+ case 'S': c = "short;"; break;
+ case 'Z': c = "boolean;"; break;
+ case 'V': c = "void;"; break; // Used when decoding return types.
+ default: result->append(descriptor); return;
+ }
+ }
+
+ // At this point, 'c' is a string of the form "fully/qualified/Type;"
+ // or "primitive;". Rewrite the type with '.' instead of '/':
+ const char* p = c;
+ while (*p != ';') {
+ char ch = *p++;
+ if (ch == '/') {
+ ch = '.';
+ }
+ result->push_back(ch);
+ }
+ // ...and replace the semicolon with 'dim' "[]" pairs:
+ for (size_t i = 0; i < dim; ++i) {
+ result->append("[]");
+ }
+}
+
+std::string PrettyDescriptor(const char* descriptor) {
+ std::string result;
+ AppendPrettyDescriptor(descriptor, &result);
+ return result;
+}
+
+std::string GetJniShortName(const std::string& class_descriptor, const std::string& method) {
+ // Remove the leading 'L' and trailing ';'...
+ std::string class_name(class_descriptor);
+ CHECK_EQ(class_name[0], 'L') << class_name;
+ CHECK_EQ(class_name[class_name.size() - 1], ';') << class_name;
+ class_name.erase(0, 1);
+ class_name.erase(class_name.size() - 1, 1);
+
+ std::string short_name;
+ short_name += "Java_";
+ short_name += MangleForJni(class_name);
+ short_name += "_";
+ short_name += MangleForJni(method);
+ return short_name;
+}
+
+// See http://java.sun.com/j2se/1.5.0/docs/guide/jni/spec/design.html#wp615 for the full rules.
+std::string MangleForJni(const std::string& s) {
+ std::string result;
+ size_t char_count = CountModifiedUtf8Chars(s.c_str());
+ const char* cp = &s[0];
+ for (size_t i = 0; i < char_count; ++i) {
+ uint32_t ch = GetUtf16FromUtf8(&cp);
+ if ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || (ch >= '0' && ch <= '9')) {
+ result.push_back(ch);
+ } else if (ch == '.' || ch == '/') {
+ result += "_";
+ } else if (ch == '_') {
+ result += "_1";
+ } else if (ch == ';') {
+ result += "_2";
+ } else if (ch == '[') {
+ result += "_3";
+ } else {
+ const uint16_t leading = GetLeadingUtf16Char(ch);
+ const uint32_t trailing = GetTrailingUtf16Char(ch);
+
+ StringAppendF(&result, "_0%04x", leading);
+ if (trailing != 0) {
+ StringAppendF(&result, "_0%04x", trailing);
+ }
+ }
+ }
+ return result;
+}
+
+std::string DotToDescriptor(const char* class_name) {
+ std::string descriptor(class_name);
+ std::replace(descriptor.begin(), descriptor.end(), '.', '/');
+ if (descriptor.length() > 0 && descriptor[0] != '[') {
+ descriptor = "L" + descriptor + ";";
+ }
+ return descriptor;
+}
+
+std::string DescriptorToDot(const char* descriptor) {
+ size_t length = strlen(descriptor);
+ if (length > 1) {
+ if (descriptor[0] == 'L' && descriptor[length - 1] == ';') {
+ // Descriptors have the leading 'L' and trailing ';' stripped.
+ std::string result(descriptor + 1, length - 2);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ } else {
+ // For arrays the 'L' and ';' remain intact.
+ std::string result(descriptor);
+ std::replace(result.begin(), result.end(), '/', '.');
+ return result;
+ }
+ }
+ // Do nothing for non-class/array descriptors.
+ return descriptor;
+}
+
+std::string DescriptorToName(const char* descriptor) {
+ size_t length = strlen(descriptor);
+ if (descriptor[0] == 'L' && descriptor[length - 1] == ';') {
+ std::string result(descriptor + 1, length - 2);
+ return result;
+ }
+ return descriptor;
+}
+
+// Helper for IsValidPartOfMemberNameUtf8(), a bit vector indicating valid low ascii.
+static uint32_t DEX_MEMBER_VALID_LOW_ASCII[4] = {
+ 0x00000000, // 00..1f low control characters; nothing valid
+ 0x03ff2010, // 20..3f digits and symbols; valid: '0'..'9', '$', '-'
+ 0x87fffffe, // 40..5f uppercase etc.; valid: 'A'..'Z', '_'
+ 0x07fffffe // 60..7f lowercase etc.; valid: 'a'..'z'
+};
+
+// Helper for IsValidPartOfMemberNameUtf8(); do not call directly.
+static bool IsValidPartOfMemberNameUtf8Slow(const char** pUtf8Ptr) {
+ /*
+ * It's a multibyte encoded character. Decode it and analyze. We
+ * accept anything that isn't (a) an improperly encoded low value,
+ * (b) an improper surrogate pair, (c) an encoded '\0', (d) a high
+ * control character, or (e) a high space, layout, or special
+ * character (U+00a0, U+2000..U+200f, U+2028..U+202f,
+ * U+fff0..U+ffff). This is all specified in the dex format
+ * document.
+ */
+
+ const uint32_t pair = GetUtf16FromUtf8(pUtf8Ptr);
+ const uint16_t leading = GetLeadingUtf16Char(pair);
+
+ // We have a surrogate pair resulting from a valid 4 byte UTF sequence.
+ // No further checks are necessary because 4 byte sequences span code
+ // points [U+10000, U+1FFFFF], which are valid codepoints in a dex
+ // identifier. Furthermore, GetUtf16FromUtf8 guarantees that each of
+ // the surrogate halves are valid and well formed in this instance.
+ if (GetTrailingUtf16Char(pair) != 0) {
+ return true;
+ }
+
+
+ // We've encountered a one, two or three byte UTF-8 sequence. The
+ // three byte UTF-8 sequence could be one half of a surrogate pair.
+ switch (leading >> 8) {
+ case 0x00:
+ // It's only valid if it's above the ISO-8859-1 high space (0xa0).
+ return (leading > 0x00a0);
+ case 0xd8:
+ case 0xd9:
+ case 0xda:
+ case 0xdb:
+ {
+ // We found a three byte sequence encoding one half of a surrogate.
+ // Look for the other half.
+ const uint32_t pair2 = GetUtf16FromUtf8(pUtf8Ptr);
+ const uint16_t trailing = GetLeadingUtf16Char(pair2);
+
+ return (GetTrailingUtf16Char(pair2) == 0) && (0xdc00 <= trailing && trailing <= 0xdfff);
+ }
+ case 0xdc:
+ case 0xdd:
+ case 0xde:
+ case 0xdf:
+ // It's a trailing surrogate, which is not valid at this point.
+ return false;
+ case 0x20:
+ case 0xff:
+ // It's in the range that has spaces, controls, and specials.
+ switch (leading & 0xfff8) {
+ case 0x2000:
+ case 0x2008:
+ case 0x2028:
+ case 0xfff0:
+ case 0xfff8:
+ return false;
+ }
+ return true;
+ default:
+ return true;
+ }
+
+ UNREACHABLE();
+}
+
+/* Return whether the pointed-at modified-UTF-8 encoded character is
+ * valid as part of a member name, updating the pointer to point past
+ * the consumed character. This will consume two encoded UTF-16 code
+ * points if the character is encoded as a surrogate pair. Also, if
+ * this function returns false, then the given pointer may only have
+ * been partially advanced.
+ */
+static bool IsValidPartOfMemberNameUtf8(const char** pUtf8Ptr) {
+ uint8_t c = (uint8_t) **pUtf8Ptr;
+ if (LIKELY(c <= 0x7f)) {
+ // It's low-ascii, so check the table.
+ uint32_t wordIdx = c >> 5;
+ uint32_t bitIdx = c & 0x1f;
+ (*pUtf8Ptr)++;
+ return (DEX_MEMBER_VALID_LOW_ASCII[wordIdx] & (1 << bitIdx)) != 0;
+ }
+
+ // It's a multibyte encoded character. Call a non-inline function
+ // for the heavy lifting.
+ return IsValidPartOfMemberNameUtf8Slow(pUtf8Ptr);
+}
+
+bool IsValidMemberName(const char* s) {
+ bool angle_name = false;
+
+ switch (*s) {
+ case '\0':
+ // The empty string is not a valid name.
+ return false;
+ case '<':
+ angle_name = true;
+ s++;
+ break;
+ }
+
+ while (true) {
+ switch (*s) {
+ case '\0':
+ return !angle_name;
+ case '>':
+ return angle_name && s[1] == '\0';
+ }
+
+ if (!IsValidPartOfMemberNameUtf8(&s)) {
+ return false;
+ }
+ }
+}
+
+enum ClassNameType { kName, kDescriptor };
+template<ClassNameType kType, char kSeparator>
+static bool IsValidClassName(const char* s) {
+ int arrayCount = 0;
+ while (*s == '[') {
+ arrayCount++;
+ s++;
+ }
+
+ if (arrayCount > 255) {
+ // Arrays may have no more than 255 dimensions.
+ return false;
+ }
+
+ ClassNameType type = kType;
+ if (type != kDescriptor && arrayCount != 0) {
+ /*
+ * If we're looking at an array of some sort, then it doesn't
+ * matter if what is being asked for is a class name; the
+ * format looks the same as a type descriptor in that case, so
+ * treat it as such.
+ */
+ type = kDescriptor;
+ }
+
+ if (type == kDescriptor) {
+ /*
+ * We are looking for a descriptor. Either validate it as a
+ * single-character primitive type, or continue on to check the
+ * embedded class name (bracketed by "L" and ";").
+ */
+ switch (*(s++)) {
+ case 'B':
+ case 'C':
+ case 'D':
+ case 'F':
+ case 'I':
+ case 'J':
+ case 'S':
+ case 'Z':
+ // These are all single-character descriptors for primitive types.
+ return (*s == '\0');
+ case 'V':
+ // Non-array void is valid, but you can't have an array of void.
+ return (arrayCount == 0) && (*s == '\0');
+ case 'L':
+ // Class name: Break out and continue below.
+ break;
+ default:
+ // Oddball descriptor character.
+ return false;
+ }
+ }
+
+ /*
+ * We just consumed the 'L' that introduces a class name as part
+ * of a type descriptor, or we are looking for an unadorned class
+ * name.
+ */
+
+ bool sepOrFirst = true; // first character or just encountered a separator.
+ for (;;) {
+ uint8_t c = (uint8_t) *s;
+ switch (c) {
+ case '\0':
+ /*
+ * Premature end for a type descriptor, but valid for
+ * a class name as long as we haven't encountered an
+ * empty component (including the degenerate case of
+ * the empty string "").
+ */
+ return (type == kName) && !sepOrFirst;
+ case ';':
+ /*
+ * Invalid character for a class name, but the
+ * legitimate end of a type descriptor. In the latter
+ * case, make sure that this is the end of the string
+ * and that it doesn't end with an empty component
+ * (including the degenerate case of "L;").
+ */
+ return (type == kDescriptor) && !sepOrFirst && (s[1] == '\0');
+ case '/':
+ case '.':
+ if (c != kSeparator) {
+ // The wrong separator character.
+ return false;
+ }
+ if (sepOrFirst) {
+ // Separator at start or two separators in a row.
+ return false;
+ }
+ sepOrFirst = true;
+ s++;
+ break;
+ default:
+ if (!IsValidPartOfMemberNameUtf8(&s)) {
+ return false;
+ }
+ sepOrFirst = false;
+ break;
+ }
+ }
+}
+
+bool IsValidBinaryClassName(const char* s) {
+ return IsValidClassName<kName, '.'>(s);
+}
+
+bool IsValidJniClassName(const char* s) {
+ return IsValidClassName<kName, '/'>(s);
+}
+
+bool IsValidDescriptor(const char* s) {
+ return IsValidClassName<kDescriptor, '/'>(s);
+}
+
+void Split(const std::string& s, char separator, std::vector<std::string>* result) {
+ const char* p = s.data();
+ const char* end = p + s.size();
+ while (p != end) {
+ if (*p == separator) {
+ ++p;
+ } else {
+ const char* start = p;
+ while (++p != end && *p != separator) {
+ // Skip to the next occurrence of the separator.
+ }
+ result->push_back(std::string(start, p - start));
+ }
+ }
+}
+
+std::string PrettyDescriptor(Primitive::Type type) {
+ return PrettyDescriptor(Primitive::Descriptor(type));
+}
+
+} // namespace art
diff --git a/runtime/dex/descriptors_names.h b/runtime/dex/descriptors_names.h
new file mode 100644
index 0000000000..22e9573556
--- /dev/null
+++ b/runtime/dex/descriptors_names.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_DESCRIPTORS_NAMES_H_
+#define ART_RUNTIME_DEX_DESCRIPTORS_NAMES_H_
+
+#include <string>
+
+#include "primitive.h"
+
+namespace art {
+
+// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
+// one of which is probably more useful to you.
+// Returns a human-readable equivalent of 'descriptor'. So "I" would be "int",
+// "[[I" would be "int[][]", "[Ljava/lang/String;" would be
+// "java.lang.String[]", and so forth.
+void AppendPrettyDescriptor(const char* descriptor, std::string* result);
+std::string PrettyDescriptor(const char* descriptor);
+std::string PrettyDescriptor(Primitive::Type type);
+
+// Performs JNI name mangling as described in section 11.3 "Linking Native Methods"
+// of the JNI spec.
+std::string MangleForJni(const std::string& s);
+
+std::string GetJniShortName(const std::string& class_name, const std::string& method_name);
+
+// Turn "java.lang.String" into "Ljava/lang/String;".
+std::string DotToDescriptor(const char* class_name);
+
+// Turn "Ljava/lang/String;" into "java.lang.String" using the conventions of
+// java.lang.Class.getName().
+std::string DescriptorToDot(const char* descriptor);
+
+// Turn "Ljava/lang/String;" into "java/lang/String" using the opposite conventions of
+// java.lang.Class.getName().
+std::string DescriptorToName(const char* descriptor);
+
+// Tests for whether 's' is a valid class name in the three common forms:
+bool IsValidBinaryClassName(const char* s); // "java.lang.String"
+bool IsValidJniClassName(const char* s); // "java/lang/String"
+bool IsValidDescriptor(const char* s); // "Ljava/lang/String;"
+
+// Returns whether the given string is a valid field or method name,
+// additionally allowing names that begin with '<' and end with '>'.
+bool IsValidMemberName(const char* s);
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_DESCRIPTORS_NAMES_H_
diff --git a/runtime/dex/dex_file-inl.h b/runtime/dex/dex_file-inl.h
index 9b56328a71..aa53daac35 100644
--- a/runtime/dex/dex_file-inl.h
+++ b/runtime/dex/dex_file-inl.h
@@ -29,14 +29,14 @@
namespace art {
inline int32_t DexFile::GetStringLength(const StringId& string_id) const {
- const uint8_t* ptr = begin_ + string_id.string_data_off_;
+ const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
return DecodeUnsignedLeb128(&ptr);
}
inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_id,
uint32_t* utf16_length) const {
DCHECK(utf16_length != nullptr) << GetLocation();
- const uint8_t* ptr = begin_ + string_id.string_data_off_;
+ const uint8_t* ptr = DataBegin() + string_id.string_data_off_;
*utf16_length = DecodeUnsignedLeb128(&ptr);
return reinterpret_cast<const char*>(ptr);
}
@@ -136,7 +136,7 @@ inline const char* DexFile::GetShorty(uint32_t proto_idx) const {
inline const DexFile::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
uint32_t offset) {
return reinterpret_cast<const TryItem*>
- (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), 4)) + offset;
+ (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), TryItem::kAlignment)) + offset;
}
static inline bool DexFileStringEquals(const DexFile* df1, dex::StringIndex sidx1,
diff --git a/runtime/dex/dex_file.cc b/runtime/dex/dex_file.cc
index 16325b83f6..18eb903551 100644
--- a/runtime/dex/dex_file.cc
+++ b/runtime/dex/dex_file.cc
@@ -30,11 +30,11 @@
#include "base/enums.h"
#include "base/stl_util.h"
+#include "descriptors_names.h"
#include "dex_file-inl.h"
#include "leb128.h"
#include "standard_dex_file.h"
#include "utf-inl.h"
-#include "utils.h"
namespace art {
@@ -50,9 +50,12 @@ uint32_t DexFile::CalculateChecksum() const {
}
uint32_t DexFile::CalculateChecksum(const uint8_t* begin, size_t size) {
- const uint32_t non_sum = OFFSETOF_MEMBER(DexFile::Header, signature_);
- const uint8_t* non_sum_ptr = begin + non_sum;
- return adler32(adler32(0L, Z_NULL, 0), non_sum_ptr, size - non_sum);
+ const uint32_t non_sum_bytes = OFFSETOF_MEMBER(DexFile::Header, signature_);
+ return ChecksumMemoryRange(begin + non_sum_bytes, size - non_sum_bytes);
+}
+
+uint32_t DexFile::ChecksumMemoryRange(const uint8_t* begin, size_t size) {
+ return adler32(adler32(0L, Z_NULL, 0), begin, size);
}
int DexFile::GetPermissions() const {
@@ -77,6 +80,8 @@ bool DexFile::DisableWrite() const {
DexFile::DexFile(const uint8_t* base,
size_t size,
+ const uint8_t* data_begin,
+ size_t data_size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
@@ -84,6 +89,8 @@ DexFile::DexFile(const uint8_t* base,
bool is_compact_dex)
: begin_(base),
size_(size),
+ data_begin_(data_begin),
+ data_size_(data_size),
location_(location),
location_checksum_(location_checksum),
header_(reinterpret_cast<const Header*>(base)),
@@ -149,15 +156,15 @@ bool DexFile::CheckMagicAndVersion(std::string* error_msg) const {
}
void DexFile::InitializeSectionsFromMapList() {
- const MapList* map_list = reinterpret_cast<const MapList*>(begin_ + header_->map_off_);
- if (header_->map_off_ == 0 || header_->map_off_ > size_) {
+ const MapList* map_list = reinterpret_cast<const MapList*>(DataBegin() + header_->map_off_);
+ if (header_->map_off_ == 0 || header_->map_off_ > DataSize()) {
// Bad offset. The dex file verifier runs after this method and will reject the file.
return;
}
const size_t count = map_list->size_;
size_t map_limit = header_->map_off_ + count * sizeof(MapItem);
- if (header_->map_off_ >= map_limit || map_limit > size_) {
+ if (header_->map_off_ >= map_limit || map_limit > DataSize()) {
// Overflow or out out of bounds. The dex file verifier runs after
// this method and will reject the file as it is malformed.
return;
@@ -166,10 +173,10 @@ void DexFile::InitializeSectionsFromMapList() {
for (size_t i = 0; i < count; ++i) {
const MapItem& map_item = map_list->list_[i];
if (map_item.type_ == kDexTypeMethodHandleItem) {
- method_handles_ = reinterpret_cast<const MethodHandleItem*>(begin_ + map_item.offset_);
+ method_handles_ = reinterpret_cast<const MethodHandleItem*>(Begin() + map_item.offset_);
num_method_handles_ = map_item.size_;
} else if (map_item.type_ == kDexTypeCallSiteIdItem) {
- call_site_ids_ = reinterpret_cast<const CallSiteIdItem*>(begin_ + map_item.offset_);
+ call_site_ids_ = reinterpret_cast<const CallSiteIdItem*>(Begin() + map_item.offset_);
num_call_site_ids_ = map_item.size_;
}
}
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index c2a36ce01a..cf8c840b59 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -29,6 +29,7 @@
#include "dex_file_types.h"
#include "dex_instruction_iterator.h"
#include "globals.h"
+#include "hidden_api_access_flags.h"
#include "jni.h"
#include "modifiers.h"
@@ -137,9 +138,6 @@ class DexFile {
uint16_t unused_;
uint32_t size_;
uint32_t offset_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MapItem);
};
struct MapList {
@@ -301,53 +299,26 @@ class DexFile {
DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
};
- // Raw code_item.
+ // Base code_item, compact dex and standard dex have different code item layouts.
struct CodeItem {
- // Used when quickening / unquickening.
- void SetDebugInfoOffset(uint32_t new_offset) {
- debug_info_off_ = new_offset;
- }
-
- uint32_t GetDebugInfoOffset() const {
- return debug_info_off_;
- }
-
protected:
- uint16_t registers_size_; // the number of registers used by this code
- // (locals + parameters)
- uint16_t ins_size_; // the number of words of incoming arguments to the method
- // that this code is for
- uint16_t outs_size_; // the number of words of outgoing argument space required
- // by this code for method invocation
- uint16_t tries_size_; // the number of try_items for this instance. If non-zero,
- // then these appear as the tries array just after the
- // insns in this instance.
- // Normally holds file offset to debug info stream. In case the method has been quickened
- // holds an offset in the Vdex file containing both the actual debug_info_off and the
- // quickening info offset.
- // Don't use this field directly, use OatFile::GetDebugInfoOffset in general ART code,
- // or DexFile::GetDebugInfoOffset in code that are not using a Runtime.
- uint32_t debug_info_off_;
-
- uint32_t insns_size_in_code_units_; // size of the insns array, in 2 byte code units
- uint16_t insns_[1]; // actual array of bytecode.
+ CodeItem() = default;
private:
- ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
- friend class CodeItemDataAccessor;
- friend class CodeItemDebugInfoAccessor;
- friend class CodeItemInstructionAccessor;
- friend class VdexFile; // TODO: Remove this one when it's cleaned up.
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
// Raw try_item.
struct TryItem {
+ static constexpr size_t kAlignment = sizeof(uint32_t);
+
uint32_t start_addr_;
uint16_t insn_count_;
uint16_t handler_off_;
private:
+ TryItem() = default;
+ friend class DexWriter;
DISALLOW_COPY_AND_ASSIGN(TryItem);
};
@@ -485,6 +456,13 @@ class DexFile {
// Returns true if the dex file supports default methods.
virtual bool SupportsDefaultMethods() const = 0;
+ // Returns the maximum size in bytes needed to store an equivalent dex file strictly conforming to
+ // the dex file specification. That is the size if we wanted to get rid of all the
+ // quickening/compact-dexing/etc.
+ //
+ // TODO This should really be an exact size! b/72402467
+ virtual size_t GetDequickenedSize() const = 0;
+
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
DCHECK(header_ != nullptr) << GetLocation();
@@ -672,11 +650,7 @@ class DexFile {
const ClassDef* FindClassDef(dex::TypeIndex type_idx) const;
const TypeList* GetInterfacesList(const ClassDef& class_def) const {
- if (class_def.interfaces_off_ == 0) {
- return nullptr;
- }
- const uint8_t* addr = begin_ + class_def.interfaces_off_;
- return reinterpret_cast<const TypeList*>(addr);
+ return DataPointer<TypeList>(class_def.interfaces_off_);
}
uint32_t NumMethodHandles() const {
@@ -699,26 +673,13 @@ class DexFile {
// Returns a pointer to the raw memory mapped class_data_item
const uint8_t* GetClassData(const ClassDef& class_def) const {
- return (class_def.class_data_off_ == 0) ? nullptr : begin_ + class_def.class_data_off_;
+ return DataPointer<uint8_t>(class_def.class_data_off_);
}
- //
+ // Return the code item for a provided offset.
const CodeItem* GetCodeItem(const uint32_t code_off) const {
- DCHECK_LT(code_off, size_) << "Code item offset larger then maximum allowed offset";
- if (code_off == 0) {
- return nullptr; // native or abstract method
- }
- const uint8_t* addr = begin_ + code_off;
- return reinterpret_cast<const CodeItem*>(addr);
- }
-
- uint32_t GetDebugInfoOffset(const CodeItem* code_item) const {
- if (code_item == nullptr) {
- return 0;
- }
- CHECK(oat_dex_file_ == nullptr)
- << "Should only use GetDebugInfoOffset in a non runtime setup";
- return code_item->GetDebugInfoOffset();
+ // May be null for native or abstract methods.
+ return DataPointer<CodeItem>(code_off);
}
const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
@@ -763,17 +724,15 @@ class DexFile {
const char* GetShorty(uint32_t proto_idx) const;
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
- return (proto_id.parameters_off_ == 0)
- ? nullptr
- : reinterpret_cast<const TypeList*>(begin_ + proto_id.parameters_off_);
+ return DataPointer<TypeList>(proto_id.parameters_off_);
}
const uint8_t* GetEncodedStaticFieldValuesArray(const ClassDef& class_def) const {
- return (class_def.static_values_off_ == 0) ? 0 : begin_ + class_def.static_values_off_;
+ return DataPointer<uint8_t>(class_def.static_values_off_);
}
const uint8_t* GetCallSiteEncodedValuesArray(const CallSiteIdItem& call_site_id) const {
- return begin_ + call_site_id.data_off_;
+ return DataBegin() + call_site_id.data_off_;
}
static const TryItem* GetTryItems(const DexInstructionIterator& code_item_end, uint32_t offset);
@@ -791,7 +750,9 @@ class DexFile {
// Check that the offset is in bounds.
// Note that although the specification says that 0 should be used if there
// is no debug information, some applications incorrectly use 0xFFFFFFFF.
- return (debug_info_off == 0 || debug_info_off >= size_) ? nullptr : begin_ + debug_info_off;
+ return (debug_info_off == 0 || debug_info_off >= data_size_)
+ ? nullptr
+ : DataBegin() + debug_info_off;
}
struct PositionInfo {
@@ -822,21 +783,17 @@ class DexFile {
static bool LineNumForPcCb(void* context, const PositionInfo& entry);
const AnnotationsDirectoryItem* GetAnnotationsDirectory(const ClassDef& class_def) const {
- return (class_def.annotations_off_ == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationsDirectoryItem*>(begin_ + class_def.annotations_off_);
+ return DataPointer<AnnotationsDirectoryItem>(class_def.annotations_off_);
}
const AnnotationSetItem* GetClassAnnotationSet(const AnnotationsDirectoryItem* anno_dir) const {
- return (anno_dir->class_annotations_off_ == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationSetItem*>(begin_ + anno_dir->class_annotations_off_);
+ return DataPointer<AnnotationSetItem>(anno_dir->class_annotations_off_);
}
const FieldAnnotationsItem* GetFieldAnnotations(const AnnotationsDirectoryItem* anno_dir) const {
return (anno_dir->fields_size_ == 0)
- ? nullptr
- : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
+ ? nullptr
+ : reinterpret_cast<const FieldAnnotationsItem*>(&anno_dir[1]);
}
const MethodAnnotationsItem* GetMethodAnnotations(const AnnotationsDirectoryItem* anno_dir)
@@ -863,33 +820,21 @@ class DexFile {
}
const AnnotationSetItem* GetFieldAnnotationSetItem(const FieldAnnotationsItem& anno_item) const {
- uint32_t offset = anno_item.annotations_off_;
- return (offset == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
}
const AnnotationSetItem* GetMethodAnnotationSetItem(const MethodAnnotationsItem& anno_item)
const {
- uint32_t offset = anno_item.annotations_off_;
- return (offset == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ return DataPointer<AnnotationSetItem>(anno_item.annotations_off_);
}
const AnnotationSetRefList* GetParameterAnnotationSetRefList(
const ParameterAnnotationsItem* anno_item) const {
- uint32_t offset = anno_item->annotations_off_;
- return (offset == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationSetRefList*>(begin_ + offset);
+ return DataPointer<AnnotationSetRefList>(anno_item->annotations_off_);
}
ALWAYS_INLINE const AnnotationItem* GetAnnotationItemAtOffset(uint32_t offset) const {
- DCHECK_LE(offset, Size());
- return (offset == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationItem*>(begin_ + offset);
+ return DataPointer<AnnotationItem>(offset);
}
const AnnotationItem* GetAnnotationItem(const AnnotationSetItem* set_item, uint32_t index) const {
@@ -898,10 +843,7 @@ class DexFile {
}
const AnnotationSetItem* GetSetRefItemItem(const AnnotationSetRefItem* anno_item) const {
- uint32_t offset = anno_item->annotations_off_;
- return (offset == 0)
- ? nullptr
- : reinterpret_cast<const AnnotationSetItem*>(begin_ + offset);
+ return DataPointer<AnnotationSetItem>(anno_item->annotations_off_);
}
// Debug info opcodes and constants
@@ -990,6 +932,20 @@ class DexFile {
return size_;
}
+ const uint8_t* DataBegin() const {
+ return data_begin_;
+ }
+
+ size_t DataSize() const {
+ return data_size_;
+ }
+
+ template <typename T>
+ const T* DataPointer(size_t offset) const {
+ DCHECK_LT(offset, DataSize()) << "Offset past end of data section";
+ return (offset != 0u) ? reinterpret_cast<const T*>(DataBegin() + offset) : nullptr;
+ }
+
const OatDexFile* GetOatDexFile() const {
return oat_dex_file_;
}
@@ -999,6 +955,11 @@ class DexFile {
oat_dex_file_ = oat_dex_file;
}
+ // Read MapItems and validate/set remaining offsets.
+ const DexFile::MapList* GetMapList() const {
+ return reinterpret_cast<const DexFile::MapList*>(DataBegin() + header_->map_off_);
+ }
+
// Utility methods for reading integral values from a buffer.
static int32_t ReadSignedInt(const uint8_t* ptr, int zwidth);
static uint32_t ReadUnsignedInt(const uint8_t* ptr, int zwidth, bool fill_on_right);
@@ -1006,8 +967,9 @@ class DexFile {
static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right);
// Recalculates the checksum of the dex file. Does not use the current value in the header.
- uint32_t CalculateChecksum() const;
+ virtual uint32_t CalculateChecksum() const;
static uint32_t CalculateChecksum(const uint8_t* begin, size_t size);
+ static uint32_t ChecksumMemoryRange(const uint8_t* begin, size_t size);
// Returns a human-readable form of the method at an index.
std::string PrettyMethod(uint32_t method_idx, bool with_signature = true) const;
@@ -1026,16 +988,26 @@ class DexFile {
ALWAYS_INLINE const StandardDexFile* AsStandardDexFile() const;
ALWAYS_INLINE const CompactDexFile* AsCompactDexFile() const;
- bool HasAddress(const void* addr) const {
+ bool IsInMainSection(const void* addr) const {
return Begin() <= addr && addr < Begin() + Size();
}
+ bool IsInDataSection(const void* addr) const {
+ return DataBegin() <= addr && addr < DataBegin() + DataSize();
+ }
+
+ DexFileContainer* GetContainer() const {
+ return container_.get();
+ }
+
protected:
// First Dex format version supporting default methods.
static const uint32_t kDefaultMethodsVersion = 37;
DexFile(const uint8_t* base,
size_t size,
+ const uint8_t* data_begin,
+ size_t data_size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
@@ -1057,6 +1029,12 @@ class DexFile {
// The size of the underlying memory allocation in bytes.
const size_t size_;
+ // The base address of the data section (same as Begin() for standard dex).
+ const uint8_t* const data_begin_;
+
+ // The size of the data section.
+ const size_t data_size_;
+
// Typically the dex file name when available, alternatively some identifying string.
//
// The ClassLinker will use this to match DexFiles the boot class
@@ -1290,10 +1268,16 @@ class ClassDataItemIterator {
}
}
uint32_t GetFieldAccessFlags() const {
- return GetRawMemberAccessFlags() & kAccValidFieldFlags;
+ return GetMemberAccessFlags() & kAccValidFieldFlags;
}
uint32_t GetMethodAccessFlags() const {
- return GetRawMemberAccessFlags() & kAccValidMethodFlags;
+ return GetMemberAccessFlags() & kAccValidMethodFlags;
+ }
+ uint32_t GetMemberAccessFlags() const {
+ return HiddenApiAccessFlags::RemoveFromDex(GetRawMemberAccessFlags());
+ }
+ HiddenApiAccessFlags::ApiList DecodeHiddenAccessFlags() const {
+ return HiddenApiAccessFlags::DecodeFromDex(GetRawMemberAccessFlags());
}
bool MemberIsNative() const {
return GetRawMemberAccessFlags() & kAccNative;
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index 72b18fb420..e01890f541 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -1559,7 +1559,7 @@ int32_t GetLineNumFromPC(const DexFile* dex_file, ArtMethod* method, uint32_t re
return -2;
}
- CodeItemDebugInfoAccessor accessor(method);
+ CodeItemDebugInfoAccessor accessor(method->DexInstructionDebugInfo());
DCHECK(accessor.HasCodeItem()) << method->PrettyMethod() << " " << dex_file->GetLocation();
// A method with no line number info should return -1
diff --git a/runtime/dex/dex_file_exception_helpers.cc b/runtime/dex/dex_file_exception_helpers.cc
index ad56eb0a0b..8e597fd3dd 100644
--- a/runtime/dex/dex_file_exception_helpers.cc
+++ b/runtime/dex/dex_file_exception_helpers.cc
@@ -16,7 +16,7 @@
#include "dex_file_exception_helpers.h"
-#include "code_item_accessors-no_art-inl.h"
+#include "code_item_accessors-inl.h"
namespace art {
diff --git a/runtime/dex/dex_file_layout.cc b/runtime/dex/dex_file_layout.cc
index 1973440d55..312898d82f 100644
--- a/runtime/dex/dex_file_layout.cc
+++ b/runtime/dex/dex_file_layout.cc
@@ -19,8 +19,8 @@
#include <sys/mman.h>
#include "base/file_utils.h"
+#include "descriptors_names.h"
#include "dex_file.h"
-#include "utils.h"
namespace art {
diff --git a/runtime/dex/dex_file_layout.h b/runtime/dex/dex_file_layout.h
index a7b9051f24..793e3b5de7 100644
--- a/runtime/dex/dex_file_layout.h
+++ b/runtime/dex/dex_file_layout.h
@@ -83,7 +83,7 @@ class DexLayoutSection {
}
void CombineSection(uint32_t start_offset, uint32_t end_offset) {
- DCHECK_LT(start_offset, end_offset);
+ DCHECK_LE(start_offset, end_offset);
if (start_offset_ == end_offset_) {
start_offset_ = start_offset;
end_offset_ = end_offset;
diff --git a/runtime/dex/dex_file_loader.cc b/runtime/dex/dex_file_loader.cc
index fafd69889d..0f2758e372 100644
--- a/runtime/dex/dex_file_loader.cc
+++ b/runtime/dex/dex_file_loader.cc
@@ -16,71 +16,142 @@
#include "dex_file_loader.h"
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#include <sys/stat.h>
-
#include "android-base/stringprintf.h"
-#include "base/file_magic.h"
#include "base/stl_util.h"
-#include "base/systrace.h"
-#include "base/unix_file/fd_file.h"
#include "compact_dex_file.h"
#include "dex_file.h"
#include "dex_file_verifier.h"
#include "standard_dex_file.h"
-#include "zip_archive.h"
+#include "ziparchive/zip_archive.h"
+
+// system/core/zip_archive definitions.
+struct ZipEntry;
+typedef void* ZipArchiveHandle;
namespace art {
namespace {
-class MemMapContainer : public DexFileContainer {
+class VectorContainer : public DexFileContainer {
public:
- explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
- virtual ~MemMapContainer() OVERRIDE { }
+ explicit VectorContainer(std::vector<uint8_t>&& vector) : vector_(std::move(vector)) { }
+ virtual ~VectorContainer() OVERRIDE { }
int GetPermissions() OVERRIDE {
- if (mem_map_.get() == nullptr) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
+ return 0;
}
bool IsReadOnly() OVERRIDE {
- return GetPermissions() == PROT_READ;
+ return true;
}
bool EnableWrite() OVERRIDE {
- CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
+ return false;
}
bool DisableWrite() OVERRIDE {
- CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ);
- }
+ return false;
}
private:
- std::unique_ptr<MemMap> mem_map_;
- DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
+ std::vector<uint8_t> vector_;
+ DISALLOW_COPY_AND_ASSIGN(VectorContainer);
};
} // namespace
using android::base::StringPrintf;
-static constexpr OatDexFile* kNoOatDexFile = nullptr;
+class DexZipArchive;
+class DexZipEntry {
+ public:
+ // Extract this entry to memory.
+ // Returns null on failure and sets error_msg.
+ const std::vector<uint8_t> Extract(std::string* error_msg) {
+ std::vector<uint8_t> map(GetUncompressedLength());
+ if (map.size() == 0) {
+ DCHECK(!error_msg->empty());
+ return map;
+ }
+ const int32_t error = ExtractToMemory(handle_, zip_entry_, map.data(), map.size());
+ if (error) {
+ *error_msg = std::string(ErrorCodeString(error));
+ }
+ return map;
+ }
+
+ virtual ~DexZipEntry() {
+ delete zip_entry_;
+ }
+
+ uint32_t GetUncompressedLength() {
+ return zip_entry_->uncompressed_length;
+ }
+
+ uint32_t GetCrc32() {
+ return zip_entry_->crc32;
+ }
+
+ private:
+ DexZipEntry(ZipArchiveHandle handle,
+ ::ZipEntry* zip_entry,
+ const std::string& entry_name)
+ : handle_(handle), zip_entry_(zip_entry), entry_name_(entry_name) {}
+
+ ZipArchiveHandle handle_;
+ ::ZipEntry* const zip_entry_;
+ std::string const entry_name_;
+
+ friend class DexZipArchive;
+ DISALLOW_COPY_AND_ASSIGN(DexZipEntry);
+};
+
+class DexZipArchive {
+ public:
+ // return new DexZipArchive instance on success, null on error.
+ static DexZipArchive* Open(const uint8_t* base, size_t size, std::string* error_msg) {
+ ZipArchiveHandle handle;
+ uint8_t* nonconst_base = const_cast<uint8_t*>(base);
+ const int32_t error = OpenArchiveFromMemory(nonconst_base, size, "ZipArchiveMemory", &handle);
+ if (error) {
+ *error_msg = std::string(ErrorCodeString(error));
+ CloseArchive(handle);
+ return nullptr;
+ }
+ return new DexZipArchive(handle);
+ }
+
+ DexZipEntry* Find(const char* name, std::string* error_msg) const {
+ DCHECK(name != nullptr);
+ // Resist the urge to delete the space. <: is a bigraph sequence.
+ std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
+ const int32_t error = FindEntry(handle_, ZipString(name), zip_entry.get());
+ if (error) {
+ *error_msg = std::string(ErrorCodeString(error));
+ return nullptr;
+ }
+ return new DexZipEntry(handle_, zip_entry.release(), name);
+ }
+
+ ~DexZipArchive() {
+ CloseArchive(handle_);
+ }
+
+
+ private:
+ explicit DexZipArchive(ZipArchiveHandle handle) : handle_(handle) {}
+ ZipArchiveHandle handle_;
+
+ friend class DexZipEntry;
+ DISALLOW_COPY_AND_ASSIGN(DexZipArchive);
+};
+
+static bool IsZipMagic(uint32_t magic) {
+ return (('P' == ((magic >> 0) & 0xff)) &&
+ ('K' == ((magic >> 8) & 0xff)));
+}
bool DexFileLoader::IsMagicValid(uint32_t magic) {
return IsMagicValid(reinterpret_cast<uint8_t*>(&magic));
@@ -101,63 +172,6 @@ bool DexFileLoader::IsVersionAndMagicValid(const uint8_t* magic) {
return false;
}
-bool DexFileLoader::GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd) {
- CHECK(checksums != nullptr);
- uint32_t magic;
-
- File fd;
- if (zip_fd != -1) {
- if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
- fd = File(zip_fd, false /* check_usage */);
- }
- } else {
- fd = OpenAndReadMagic(filename, &magic, error_msg);
- }
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(
- ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
- if (zip_archive.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
- error_msg->c_str());
- return false;
- }
-
- uint32_t i = 0;
- std::string zip_entry_name = GetMultiDexClassesDexName(i++);
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
- zip_entry_name.c_str(), error_msg->c_str());
- return false;
- }
-
- do {
- checksums->push_back(zip_entry->GetCrc32());
- zip_entry_name = GetMultiDexClassesDexName(i++);
- zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- } while (zip_entry.get() != nullptr);
- return true;
- }
- if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(
- OpenFile(fd.Release(), filename, false, false, error_msg));
- if (dex_file == nullptr) {
- return false;
- }
- checksums->push_back(dex_file->GetHeader().checksum_);
- return true;
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
bool DexFileLoader::IsMultiDexLocation(const char* location) {
return strrchr(location, kMultiDexSeparator) != nullptr;
}
@@ -187,6 +201,17 @@ std::string DexFileLoader::GetDexCanonicalLocation(const char* dex_location) {
}
}
+// All of the implementations here should be independent of the runtime.
+// TODO: implement all the virtual methods.
+
+bool DexFileLoader::GetMultiDexChecksums(const char* filename ATTRIBUTE_UNUSED,
+ std::vector<uint32_t>* checksums ATTRIBUTE_UNUSED,
+ std::string* error_msg,
+ int zip_fd ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
+ return false;
+}
+
std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
size_t size,
const std::string& location,
@@ -194,10 +219,11 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
const OatDexFile* oat_dex_file,
bool verify,
bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from RAM ") + location);
+ std::string* error_msg) const {
return OpenCommon(base,
size,
+ /*data_base*/ nullptr,
+ /*data_size*/ 0,
location,
location_checksum,
oat_dex_file,
@@ -208,58 +234,64 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
/*verify_result*/ nullptr);
}
-std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- location_checksum,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- /*verify_result*/ nullptr);
- return dex_file;
+std::unique_ptr<const DexFile> DexFileLoader::OpenWithDataSection(
+ const uint8_t* base,
+ size_t size,
+ const uint8_t* data_base,
+ size_t data_size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const {
+ return OpenCommon(base,
+ size,
+ data_base,
+ data_size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ verify,
+ verify_checksum,
+ error_msg,
+ /*container*/ nullptr,
+ /*verify_result*/ nullptr);
}
-bool DexFileLoader::Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+bool DexFileLoader::OpenAll(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
- uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
+ uint32_t magic = *reinterpret_cast<const uint32_t*>(base);
if (IsZipMagic(magic)) {
- return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
+ std::unique_ptr<DexZipArchive> zip_archive(DexZipArchive::Open(base, size, error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ return OpenAllDexFilesFromZip(*zip_archive.get(),
+ location,
+ verify,
+ verify_checksum,
+ error_msg,
+ dex_files);
}
if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
- location,
- verify,
- verify_checksum,
- error_msg));
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(base);
+ std::unique_ptr<const DexFile> dex_file(Open(base,
+ size,
+ location,
+ dex_header->checksum_,
+ /*oat_dex_file*/ nullptr,
+ verify,
+ verify_checksum,
+ error_msg));
if (dex_file.get() != nullptr) {
dex_files->push_back(std::move(dex_file));
return true;
@@ -267,106 +299,90 @@ bool DexFileLoader::Open(const char* filename,
return false;
}
}
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ *error_msg = StringPrintf("Expected valid zip or dex file");
return false;
}
-std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, verify, verify_checksum, error_msg);
-}
-
-bool DexFileLoader::OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
- if (zip_archive.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return false;
+std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
+ size_t size,
+ const uint8_t* data_base,
+ size_t data_size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ DexFileContainer* container,
+ VerifyResult* verify_result) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyNotAttempted;
}
- return OpenAllDexFilesFromZip(
- *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
-}
-
-std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
- {
- File delayed_close(fd, /* check_usage */ false);
- struct stat sbuf;
- memset(&sbuf, 0, sizeof(sbuf));
- if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
- strerror(errno));
- return nullptr;
- }
- if (S_ISDIR(sbuf.st_mode)) {
- *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
- return nullptr;
+ std::unique_ptr<DexFile> dex_file;
+ if (StandardDexFile::IsMagicValid(base)) {
+ if (data_size != 0) {
+ CHECK_EQ(base, data_base) << "Unsupported for standard dex";
}
- size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
+ dex_file.reset(new StandardDexFile(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container));
+ } else if (CompactDexFile::IsMagicValid(base)) {
+ if (data_base == nullptr) {
+ // TODO: Is there a clean way to support both an explicit data section and reading the one
+ // from the header.
+ CHECK_EQ(data_size, 0u);
+ const CompactDexFile::Header* const header = CompactDexFile::Header::At(base);
+ data_base = base + header->data_off_;
+ data_size = header->data_size_;
}
+ dex_file.reset(new CompactDexFile(base,
+ size,
+ data_base,
+ data_size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container));
}
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
+ if (dex_file == nullptr) {
+ *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
+ error_msg->c_str());
return nullptr;
}
-
- const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- dex_header->checksum_,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- /*verify_result*/ nullptr);
-
+ if (!dex_file->Init(error_msg)) {
+ dex_file.reset();
+ return nullptr;
+ }
+ if (verify && !DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifyFailed;
+ }
+ return nullptr;
+ }
+ if (verify_result != nullptr) {
+ *verify_result = VerifyResult::kVerifySucceeded;
+ }
return dex_file;
}
std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
- const ZipArchive& zip_archive,
+ const DexZipArchive& zip_archive,
const char* entry_name,
const std::string& location,
bool verify,
bool verify_checksum,
std::string* error_msg,
- ZipOpenErrorCode* error_code) {
- ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
+ ZipOpenErrorCode* error_code) const {
CHECK(!location.empty());
- std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
+ std::unique_ptr<DexZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
if (zip_entry == nullptr) {
*error_code = ZipOpenErrorCode::kEntryNotFound;
return nullptr;
@@ -377,48 +393,26 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
return nullptr;
}
- std::unique_ptr<MemMap> map;
- if (zip_entry->IsUncompressed()) {
- if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
- // Do not mmap unaligned ZIP entries because
- // doing so would fail dex verification which requires 4 byte alignment.
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
- << "Falling back to extracting file.";
- } else {
- // Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "is your ZIP file corrupted? Falling back to extraction.";
- // Try again with Extraction which still has a chance of recovery.
- }
- }
- }
-
- if (map == nullptr) {
- // Default path for compressed ZIP entries,
- // and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- }
-
- if (map == nullptr) {
+ std::vector<uint8_t> map(zip_entry->Extract(error_msg));
+ if (map.size() == 0) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
return nullptr;
}
VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- zip_entry->GetCrc32(),
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- &verify_result);
+ std::unique_ptr<const DexFile> dex_file = OpenCommon(map.data(),
+ map.size(),
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ zip_entry->GetCrc32(),
+ /*oat_dex_file*/ nullptr,
+ verify,
+ verify_checksum,
+ error_msg,
+ new VectorContainer(std::move(map)),
+ &verify_result);
if (dex_file == nullptr) {
if (verify_result == VerifyResult::kVerifyNotAttempted) {
*error_code = ZipOpenErrorCode::kDexFileError;
@@ -427,12 +421,6 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
}
return nullptr;
}
- if (!dex_file->DisableWrite()) {
- *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
- *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
- return nullptr;
- }
- CHECK(dex_file->IsReadOnly()) << location;
if (verify_result != VerifyResult::kVerifySucceeded) {
*error_code = ZipOpenErrorCode::kVerifyError;
return nullptr;
@@ -447,13 +435,13 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
// seems an excessive number.
static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open from Zip " + std::string(location));
+bool DexFileLoader::OpenAllDexFilesFromZip(
+ const DexZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
@@ -508,52 +496,4 @@ bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
return true;
}
}
-
-std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- DexFileContainer* container,
- VerifyResult* verify_result) {
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifyNotAttempted;
- }
- std::unique_ptr<DexFile> dex_file;
- if (StandardDexFile::IsMagicValid(base)) {
- dex_file.reset(
- new StandardDexFile(base, size, location, location_checksum, oat_dex_file, container));
- } else if (CompactDexFile::IsMagicValid(base)) {
- dex_file.reset(
- new CompactDexFile(base, size, location, location_checksum, oat_dex_file, container));
- }
- if (dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
- error_msg->c_str());
- return nullptr;
- }
- if (!dex_file->Init(error_msg)) {
- dex_file.reset();
- return nullptr;
- }
- if (verify && !DexFileVerifier::Verify(dex_file.get(),
- dex_file->Begin(),
- dex_file->Size(),
- location.c_str(),
- verify_checksum,
- error_msg)) {
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifyFailed;
- }
- return nullptr;
- }
- if (verify_result != nullptr) {
- *verify_result = VerifyResult::kVerifySucceeded;
- }
- return dex_file;
-}
-
} // namespace art
diff --git a/runtime/dex/dex_file_loader.h b/runtime/dex/dex_file_loader.h
index 7db8d8e08e..508397cb00 100644
--- a/runtime/dex/dex_file_loader.h
+++ b/runtime/dex/dex_file_loader.h
@@ -28,7 +28,8 @@ class DexFile;
class DexFileContainer;
class MemMap;
class OatDexFile;
-class ZipArchive;
+
+class DexZipArchive;
// Class that is used to open dex files and deal with corresponding multidex and location logic.
class DexFileLoader {
@@ -46,65 +47,10 @@ class DexFileLoader {
// Return true if the corresponding version and magic is valid.
static bool IsVersionAndMagicValid(const uint8_t* magic);
- // Returns the checksums of a file for comparison with GetLocationChecksum().
- // For .dex files, this is the single header checksum.
- // For zip files, this is the zip entry CRC32 checksum for classes.dex and
- // each additional multidex entry classes2.dex, classes3.dex, etc.
- // If a valid zip_fd is provided the file content will be read directly from
- // the descriptor and `filename` will be used as alias for error logging. If
- // zip_fd is -1, the method will try to open the `filename` and read the
- // content from it.
- // Return true if the checksums could be found, false otherwise.
- static bool GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd = -1);
-
// Check whether a location denotes a multidex dex file. This is a very simple check: returns
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
- // Opens .dex file, backed by existing memory
- static std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens .dex file that has been memory-mapped by the caller.
- static std::unique_ptr<const DexFile> Open(const std::string& location,
- uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens all .dex files found in the file, guessing the container format based on file extension.
- static bool Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
- // Open a single dex file from an fd. This function closes the fd.
- static std::unique_ptr<const DexFile> OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
- // Opens dex files from within a .jar, .zip, or .apk file
- static bool OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
// Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for
// index == 0, and classes{index + 1}.dex else.
static std::string GetMultiDexClassesDexName(size_t index);
@@ -148,13 +94,57 @@ class DexFileLoader {
return (pos == std::string::npos) ? std::string() : location.substr(pos);
}
- private:
- static std::unique_ptr<const DexFile> OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
+ virtual ~DexFileLoader() { }
+ // Returns the checksums of a file for comparison with GetLocationChecksum().
+ // For .dex files, this is the single header checksum.
+ // For zip files, this is the zip entry CRC32 checksum for classes.dex and
+ // each additional multidex entry classes2.dex, classes3.dex, etc.
+ // If a valid zip_fd is provided the file content will be read directly from
+ // the descriptor and `filename` will be used as alias for error logging. If
+ // zip_fd is -1, the method will try to open the `filename` and read the
+ // content from it.
+ // Return true if the checksums could be found, false otherwise.
+ virtual bool GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd = -1) const;
+
+ // Opens .dex file, backed by existing memory
+ virtual std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const;
+
+ // Open a dex file with a separate data section.
+ virtual std::unique_ptr<const DexFile> OpenWithDataSection(
+ const uint8_t* base,
+ size_t size,
+ const uint8_t* data_base,
+ size_t data_size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const;
+
+
+ // Opens all .dex files found in the memory map, guessing the container format based on file
+ // extension.
+ virtual bool OpenAll(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ protected:
enum class ZipOpenErrorCode {
kNoError,
kEntryNotFound,
@@ -164,24 +154,6 @@ class DexFileLoader {
kVerifyError
};
- // Open all classesXXX.dex files from a zip archive.
- static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
- // return.
- static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code);
-
enum class VerifyResult { // private
kVerifyNotAttempted,
kVerifySucceeded,
@@ -190,6 +162,8 @@ class DexFileLoader {
static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
size_t size,
+ const uint8_t* data_base,
+ size_t data_size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
@@ -198,6 +172,25 @@ class DexFileLoader {
std::string* error_msg,
DexFileContainer* container,
VerifyResult* verify_result);
+
+ private:
+ // Open all classesXXX.dex files from a zip archive.
+ bool OpenAllDexFilesFromZip(const DexZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
+ // return.
+ std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const DexZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const;
};
} // namespace art
diff --git a/runtime/dex/dex_file_test.cc b/runtime/dex/dex_file_test.cc
index 3ee115c01b..998bfd6c7f 100644
--- a/runtime/dex/dex_file_test.cc
+++ b/runtime/dex/dex_file_test.cc
@@ -20,17 +20,18 @@
#include <memory>
+#include "art_dex_file_loader.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "code_item_accessors-inl.h"
#include "common_runtime_test.h"
+#include "descriptors_names.h"
#include "dex_file-inl.h"
#include "dex_file_loader.h"
#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
-#include "utils.h"
namespace art {
@@ -237,7 +238,8 @@ static bool OpenDexFilesBase64(const char* base64,
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ bool success = dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, error_msg, &tmp);
if (success) {
for (std::unique_ptr<const DexFile>& dex_file : tmp) {
@@ -277,12 +279,13 @@ static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base
/* reuse */ false,
&error_message));
memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size());
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
- location_checksum,
- std::move(region),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_message));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
+ location_checksum,
+ std::move(region),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_message));
if (expect_success) {
CHECK(dex_file != nullptr) << error_message;
} else {
@@ -368,7 +371,8 @@ TEST_F(DexFileTest, Version40Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -381,7 +385,8 @@ TEST_F(DexFileTest, Version41Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -394,7 +399,8 @@ TEST_F(DexFileTest, ZeroLengthDexRejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -408,9 +414,10 @@ TEST_F(DexFileTest, GetChecksum) {
std::vector<uint32_t> checksums;
ScopedObjectAccess soa(Thread::Current());
std::string error_msg;
- EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
- &checksums,
- &error_msg))
+ const ArtDexFileLoader dex_file_loader;
+ EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
+ &checksums,
+ &error_msg))
<< error_msg;
ASSERT_EQ(1U, checksums.size());
EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksums[0]);
@@ -420,9 +427,10 @@ TEST_F(DexFileTest, GetMultiDexChecksums) {
std::string error_msg;
std::vector<uint32_t> checksums;
std::string multidex_file = GetTestDexFileName("MultiDex");
- EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(multidex_file.c_str(),
- &checksums,
- &error_msg)) << error_msg;
+ const ArtDexFileLoader dex_file_loader;
+ EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(multidex_file.c_str(),
+ &checksums,
+ &error_msg)) << error_msg;
std::vector<std::unique_ptr<const DexFile>> dexes = OpenTestDexFiles("MultiDex");
ASSERT_EQ(2U, dexes.size());
@@ -730,8 +738,10 @@ TEST_F(DexFileTest, OpenDexDebugInfoLocalNullType) {
std::unique_ptr<const DexFile> raw = OpenDexFileInMemoryBase64(
kRawDexDebugInfoLocalNullType, tmp.GetFilename().c_str(), 0xf25f2b38U, true);
const DexFile::ClassDef& class_def = raw->GetClassDef(0);
- const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def, 1));
- CodeItemDebugInfoAccessor accessor(*raw, code_item);
+ constexpr uint32_t kMethodIdx = 1;
+ const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def,
+ kMethodIdx));
+ CodeItemDebugInfoAccessor accessor(*raw, code_item, kMethodIdx);
ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, Callback, nullptr));
}
diff --git a/runtime/dex/dex_file_verifier.cc b/runtime/dex/dex_file_verifier.cc
index 7265aad1ba..62667052ad 100644
--- a/runtime/dex/dex_file_verifier.cc
+++ b/runtime/dex/dex_file_verifier.cc
@@ -23,13 +23,12 @@
#include "android-base/stringprintf.h"
-#include "code_item_accessors-no_art-inl.h"
+#include "code_item_accessors-inl.h"
+#include "descriptors_names.h"
#include "dex_file-inl.h"
-#include "experimental_flags.h"
#include "leb128.h"
-#include "safe_map.h"
+#include "modifiers.h"
#include "utf-inl.h"
-#include "utils.h"
namespace art {
@@ -453,6 +452,7 @@ bool DexFileVerifier::CheckMap() {
uint32_t count = map->size_;
uint32_t last_offset = 0;
+ uint32_t last_type = 0;
uint32_t data_item_count = 0;
uint32_t data_items_left = header_->data_size_;
uint32_t used_bits = 0;
@@ -465,7 +465,11 @@ bool DexFileVerifier::CheckMap() {
// Check the items listed in the map.
for (uint32_t i = 0; i < count; i++) {
if (UNLIKELY(last_offset >= item->offset_ && i != 0)) {
- ErrorStringPrintf("Out of order map item: %x then %x", last_offset, item->offset_);
+ ErrorStringPrintf("Out of order map item: %x then %x for type %x last type was %x",
+ last_offset,
+ item->offset_,
+ static_cast<uint32_t>(item->type_),
+ last_type);
return false;
}
if (UNLIKELY(item->offset_ >= header_->file_size_)) {
@@ -501,6 +505,7 @@ bool DexFileVerifier::CheckMap() {
used_bits |= bit;
last_offset = item->offset_;
+ last_type = item->type_;
item++;
}
diff --git a/runtime/dex/dex_file_verifier_test.cc b/runtime/dex/dex_file_verifier_test.cc
index d4d912cbfb..d73a7fbfa3 100644
--- a/runtime/dex/dex_file_verifier_test.cc
+++ b/runtime/dex/dex_file_verifier_test.cc
@@ -22,10 +22,12 @@
#include <functional>
#include <memory>
+#include "art_dex_file_loader.h"
#include "base/bit_utils.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "descriptors_names.h"
#include "dex_file-inl.h"
#include "dex_file_loader.h"
#include "dex_file_types.h"
@@ -33,7 +35,6 @@
#include "scoped_thread_state_change-inl.h"
#include "standard_dex_file.h"
#include "thread-current-inl.h"
-#include "utils.h"
namespace art {
@@ -114,7 +115,8 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ bool success = dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, error_msg, &tmp);
CHECK(success) << *error_msg;
EXPECT_EQ(1U, tmp.size());
diff --git a/runtime/dex/dex_instruction.cc b/runtime/dex/dex_instruction.cc
index 6ebe2286e8..b84791ffae 100644
--- a/runtime/dex/dex_instruction.cc
+++ b/runtime/dex/dex_instruction.cc
@@ -24,7 +24,7 @@
#include "android-base/stringprintf.h"
#include "dex_file-inl.h"
-#include "utils.h"
+#include "utf.h"
namespace art {
diff --git a/runtime/invoke_type.h b/runtime/dex/invoke_type.h
index 2b877e6f51..726d269a3e 100644
--- a/runtime/invoke_type.h
+++ b/runtime/dex/invoke_type.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_INVOKE_TYPE_H_
-#define ART_RUNTIME_INVOKE_TYPE_H_
+#ifndef ART_RUNTIME_DEX_INVOKE_TYPE_H_
+#define ART_RUNTIME_DEX_INVOKE_TYPE_H_
#include <iosfwd>
@@ -35,4 +35,4 @@ std::ostream& operator<<(std::ostream& os, const InvokeType& rhs);
} // namespace art
-#endif // ART_RUNTIME_INVOKE_TYPE_H_
+#endif // ART_RUNTIME_DEX_INVOKE_TYPE_H_
diff --git a/runtime/dex/modifiers.cc b/runtime/dex/modifiers.cc
new file mode 100644
index 0000000000..30daefb172
--- /dev/null
+++ b/runtime/dex/modifiers.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "modifiers.h"
+
+namespace art {
+
+std::string PrettyJavaAccessFlags(uint32_t access_flags) {
+ std::string result;
+ if ((access_flags & kAccPublic) != 0) {
+ result += "public ";
+ }
+ if ((access_flags & kAccProtected) != 0) {
+ result += "protected ";
+ }
+ if ((access_flags & kAccPrivate) != 0) {
+ result += "private ";
+ }
+ if ((access_flags & kAccFinal) != 0) {
+ result += "final ";
+ }
+ if ((access_flags & kAccStatic) != 0) {
+ result += "static ";
+ }
+ if ((access_flags & kAccAbstract) != 0) {
+ result += "abstract ";
+ }
+ if ((access_flags & kAccInterface) != 0) {
+ result += "interface ";
+ }
+ if ((access_flags & kAccTransient) != 0) {
+ result += "transient ";
+ }
+ if ((access_flags & kAccVolatile) != 0) {
+ result += "volatile ";
+ }
+ if ((access_flags & kAccSynchronized) != 0) {
+ result += "synchronized ";
+ }
+ return result;
+}
+
+} // namespace art
diff --git a/runtime/modifiers.h b/runtime/dex/modifiers.h
index d7d647b8fd..2998f602d4 100644
--- a/runtime/modifiers.h
+++ b/runtime/dex/modifiers.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_MODIFIERS_H_
-#define ART_RUNTIME_MODIFIERS_H_
+#ifndef ART_RUNTIME_DEX_MODIFIERS_H_
+#define ART_RUNTIME_DEX_MODIFIERS_H_
#include <stdint.h>
@@ -42,6 +42,12 @@ static constexpr uint32_t kAccEnum = 0x4000; // class, field, ic (1.5)
static constexpr uint32_t kAccJavaFlagsMask = 0xffff; // bits set from Java sources (low 16)
+// The following flags are used to insert hidden API access flags into boot
+// class path dex files. They are decoded by DexFile::ClassDataItemIterator and
+// removed from the access flags before used by the runtime.
+static constexpr uint32_t kAccDexHiddenBit = 0x00000020; // field, method (not native)
+static constexpr uint32_t kAccDexHiddenBitNative = 0x00000200; // method (native)
+
static constexpr uint32_t kAccConstructor = 0x00010000; // method (dex only) <(cl)init>
static constexpr uint32_t kAccDeclaredSynchronized = 0x00020000; // method (dex only)
static constexpr uint32_t kAccClassIsProxy = 0x00040000; // class (dex only)
@@ -83,9 +89,11 @@ static constexpr uint32_t kAccMustCountLocks = 0x04000000; // method (ru
// virtual call.
static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
+static constexpr uint32_t kAccHiddenApiBits = 0x30000000; // field, method
+
// Not currently used, except for intrinsic methods where these bits
// are part of the intrinsic ordinal.
-static constexpr uint32_t kAccMayBeUnusedBits = 0x70000000;
+static constexpr uint32_t kAccMayBeUnusedBits = 0x40000000;
// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
@@ -100,8 +108,9 @@ static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
// Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
// which overlap are not valid when kAccIntrinsic is set.
-static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccSingleImplementation |
- kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict | kAccPreviouslyWarm;
+static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccHiddenApiBits |
+ kAccSingleImplementation | kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict |
+ kAccPreviouslyWarm;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
@@ -127,7 +136,13 @@ static constexpr uint32_t kAccValidClassFlags = kAccPublic | kAccFinal | kAccSup
static constexpr uint32_t kAccValidInterfaceFlags = kAccPublic | kAccInterface |
kAccAbstract | kAccSynthetic | kAccAnnotation;
+static constexpr uint32_t kAccVisibilityFlags = kAccPublic | kAccPrivate | kAccProtected;
+
+// Returns a human-readable version of the Java part of the access flags, e.g., "private static "
+// (note the trailing whitespace).
+std::string PrettyJavaAccessFlags(uint32_t access_flags);
+
} // namespace art
-#endif // ART_RUNTIME_MODIFIERS_H_
+#endif // ART_RUNTIME_DEX_MODIFIERS_H_
diff --git a/runtime/dex/standard_dex_file.cc b/runtime/dex/standard_dex_file.cc
index 52fdff303b..f7317eb997 100644
--- a/runtime/dex/standard_dex_file.cc
+++ b/runtime/dex/standard_dex_file.cc
@@ -17,7 +17,7 @@
#include "standard_dex_file.h"
#include "base/casts.h"
-#include "code_item_accessors-no_art-inl.h"
+#include "code_item_accessors-inl.h"
#include "dex_file-inl.h"
#include "leb128.h"
@@ -73,10 +73,7 @@ bool StandardDexFile::SupportsDefaultMethods() const {
}
uint32_t StandardDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
- DCHECK(HasAddress(&item));
- // TODO: Clean up this temporary code duplication with StandardDexFile. Eventually the
- // implementations will differ.
- DCHECK(HasAddress(&item));
+ DCHECK(IsInDataSection(&item));
return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
reinterpret_cast<uintptr_t>(&item);
}
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index fb2f720920..e0e9f2f11c 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -33,8 +33,30 @@ class StandardDexFile : public DexFile {
};
struct CodeItem : public DexFile::CodeItem {
+ static constexpr size_t kAlignment = 4;
+
private:
- // TODO: Insert standard dex specific fields here.
+ CodeItem() = default;
+
+ uint16_t registers_size_; // the number of registers used by this code
+ // (locals + parameters)
+ uint16_t ins_size_; // the number of words of incoming arguments to the method
+ // that this code is for
+ uint16_t outs_size_; // the number of words of outgoing argument space required
+ // by this code for method invocation
+ uint16_t tries_size_; // the number of try_items for this instance. If non-zero,
+ // then these appear as the tries array just after the
+ // insns in this instance.
+ uint32_t debug_info_off_; // Holds file offset to debug info stream.
+
+ uint32_t insns_size_in_code_units_; // size of the insns array, in 2 byte code units
+ uint16_t insns_[1]; // actual array of bytecode.
+
+ ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
+ friend class CodeItemDataAccessor;
+ friend class CodeItemDebugInfoAccessor;
+ friend class CodeItemInstructionAccessor;
+ friend class DexWriter;
friend class StandardDexFile;
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
@@ -61,6 +83,10 @@ class StandardDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return Size();
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
@@ -70,6 +96,8 @@ class StandardDexFile : public DexFile {
DexFileContainer* container)
: DexFile(base,
size,
+ /*data_begin*/ base,
+ /*data_size*/ size,
location,
location_checksum,
oat_dex_file,
@@ -80,6 +108,7 @@ class StandardDexFile : public DexFile {
friend class DexFileVerifierTest;
ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor
+ friend class OptimizingUnitTestHelper; // for constructor
DISALLOW_COPY_AND_ASSIGN(StandardDexFile);
};
diff --git a/runtime/utf-inl.h b/runtime/dex/utf-inl.h
index b2d6765fb0..4f626a8580 100644
--- a/runtime/utf-inl.h
+++ b/runtime/dex/utf-inl.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_UTF_INL_H_
-#define ART_RUNTIME_UTF_INL_H_
+#ifndef ART_RUNTIME_DEX_UTF_INL_H_
+#define ART_RUNTIME_DEX_UTF_INL_H_
#include "utf.h"
@@ -96,4 +96,4 @@ inline int CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(const char* u
} // namespace art
-#endif // ART_RUNTIME_UTF_INL_H_
+#endif // ART_RUNTIME_DEX_UTF_INL_H_
diff --git a/runtime/utf.cc b/runtime/dex/utf.cc
index 93fcb32136..772a610140 100644
--- a/runtime/utf.cc
+++ b/runtime/dex/utf.cc
@@ -17,13 +17,17 @@
#include "utf.h"
#include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
-#include "mirror/array.h"
-#include "mirror/object-inl.h"
+#include "base/casts.h"
#include "utf-inl.h"
namespace art {
+using android::base::StringAppendF;
+using android::base::StringPrintf;
+
// This is used only from debugger and test code.
size_t CountModifiedUtf8Chars(const char* utf8) {
return CountModifiedUtf8Chars(utf8, strlen(utf8));
@@ -263,4 +267,55 @@ size_t CountUtf8Bytes(const uint16_t* chars, size_t char_count) {
return result;
}
+static inline constexpr bool NeedsEscaping(uint16_t ch) {
+ return (ch < ' ' || ch > '~');
+}
+
+std::string PrintableChar(uint16_t ch) {
+ std::string result;
+ result += '\'';
+ if (NeedsEscaping(ch)) {
+ StringAppendF(&result, "\\u%04x", ch);
+ } else {
+ result += static_cast<std::string::value_type>(ch);
+ }
+ result += '\'';
+ return result;
+}
+
+std::string PrintableString(const char* utf) {
+ std::string result;
+ result += '"';
+ const char* p = utf;
+ size_t char_count = CountModifiedUtf8Chars(p);
+ for (size_t i = 0; i < char_count; ++i) {
+ uint32_t ch = GetUtf16FromUtf8(&p);
+ if (ch == '\\') {
+ result += "\\\\";
+ } else if (ch == '\n') {
+ result += "\\n";
+ } else if (ch == '\r') {
+ result += "\\r";
+ } else if (ch == '\t') {
+ result += "\\t";
+ } else {
+ const uint16_t leading = GetLeadingUtf16Char(ch);
+
+ if (NeedsEscaping(leading)) {
+ StringAppendF(&result, "\\u%04x", leading);
+ } else {
+ result += static_cast<std::string::value_type>(leading);
+ }
+
+ const uint32_t trailing = GetTrailingUtf16Char(ch);
+ if (trailing != 0) {
+ // All high surrogates will need escaping.
+ StringAppendF(&result, "\\u%04x", trailing);
+ }
+ }
+ }
+ result += '"';
+ return result;
+}
+
} // namespace art
diff --git a/runtime/utf.h b/runtime/dex/utf.h
index cbb32fa6cd..4adfc4af8c 100644
--- a/runtime/utf.h
+++ b/runtime/dex/utf.h
@@ -14,14 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_UTF_H_
-#define ART_RUNTIME_UTF_H_
+#ifndef ART_RUNTIME_DEX_UTF_H_
+#define ART_RUNTIME_DEX_UTF_H_
#include "base/macros.h"
#include <stddef.h>
#include <stdint.h>
+#include <string>
+
/*
* All UTF-8 in art is actually modified UTF-8. Mostly, this distinction
* doesn't matter.
@@ -121,6 +123,13 @@ ALWAYS_INLINE uint16_t GetLeadingUtf16Char(uint32_t maybe_pair);
*/
ALWAYS_INLINE uint16_t GetTrailingUtf16Char(uint32_t maybe_pair);
+// Returns a printable (escaped) version of a character.
+std::string PrintableChar(uint16_t ch);
+
+// Returns an ASCII string corresponding to the given UTF-8 string.
+// Java escapes are used for non-ASCII characters.
+std::string PrintableString(const char* utf8);
+
} // namespace art
-#endif // ART_RUNTIME_UTF_H_
+#endif // ART_RUNTIME_DEX_UTF_H_
diff --git a/runtime/utf_test.cc b/runtime/dex/utf_test.cc
index d1e97515d3..d1e97515d3 100644
--- a/runtime/utf_test.cc
+++ b/runtime/dex/utf_test.cc
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index e459f09e95..20cde530c2 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -27,6 +27,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
@@ -43,6 +44,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
virtual void SetUp() OVERRIDE {
CommonRuntimeTest::SetUp();
+ const ArtDexFileLoader dex_file_loader;
// Create a scratch directory to work from.
@@ -74,7 +76,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
<< "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
ASSERT_FALSE(
- DexFileLoader::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
+ dex_file_loader.GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
<< "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
<< "Expected dex file to be at: " << GetDexSrc2();
@@ -83,21 +85,21 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
// GetMultiDexSrc1, but a different secondary dex checksum.
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> multi1;
- ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &multi1)) << error_msg;
+ ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
+ GetMultiDexSrc1().c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &multi1)) << error_msg;
ASSERT_GT(multi1.size(), 1u);
std::vector<std::unique_ptr<const DexFile>> multi2;
- ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &multi2)) << error_msg;
+ ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
+ GetMultiDexSrc2().c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &multi2)) << error_msg;
ASSERT_GT(multi2.size(), 1u);
ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
diff --git a/runtime/dex_to_dex_decompiler.cc b/runtime/dex_to_dex_decompiler.cc
index e1c07baede..7887191713 100644
--- a/runtime/dex_to_dex_decompiler.cc
+++ b/runtime/dex_to_dex_decompiler.cc
@@ -36,8 +36,7 @@ class DexDecompiler {
const ArrayRef<const uint8_t>& quickened_info,
bool decompile_return_instruction)
: code_item_accessor_(dex_file, &code_item),
- quicken_info_(quickened_info.data()),
- quicken_info_number_of_indices_(QuickenInfoTable::NumberOfIndices(quickened_info.size())),
+ quicken_info_(quickened_info),
decompile_return_instruction_(decompile_return_instruction) {}
bool Decompile();
@@ -72,7 +71,7 @@ class DexDecompiler {
}
uint16_t NextIndex() {
- DCHECK_LT(quicken_index_, quicken_info_number_of_indices_);
+ DCHECK_LT(quicken_index_, quicken_info_.NumIndices());
const uint16_t ret = quicken_info_.GetData(quicken_index_);
quicken_index_++;
return ret;
@@ -80,7 +79,6 @@ class DexDecompiler {
const CodeItemInstructionAccessor code_item_accessor_;
const QuickenInfoTable quicken_info_;
- const size_t quicken_info_number_of_indices_;
const bool decompile_return_instruction_;
size_t quicken_index_ = 0u;
@@ -104,7 +102,7 @@ bool DexDecompiler::Decompile() {
break;
case Instruction::NOP:
- if (quicken_info_number_of_indices_ > 0) {
+ if (quicken_info_.NumIndices() > 0) {
// Only try to decompile NOP if there are more than 0 indices. Not having
// any index happens when we unquicken a code item that only has
// RETURN_VOID_NO_BARRIER as quickened instruction.
@@ -181,14 +179,14 @@ bool DexDecompiler::Decompile() {
}
}
- if (quicken_index_ != quicken_info_number_of_indices_) {
+ if (quicken_index_ != quicken_info_.NumIndices()) {
if (quicken_index_ == 0) {
LOG(WARNING) << "Failed to use any value in quickening info,"
<< " potentially due to duplicate methods.";
} else {
LOG(FATAL) << "Failed to use all values in quickening info."
<< " Actual: " << std::hex << quicken_index_
- << " Expected: " << quicken_info_number_of_indices_;
+ << " Expected: " << quicken_info_.NumIndices();
return false;
}
}
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index d93d76793f..037d1fb49c 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -213,8 +213,8 @@ void DexoptTest::ReserveImageSpace() {
// Ensure a chunk of memory is reserved for the image space.
// The reservation_end includes room for the main space that has to come
// right after the image in case of the GSS collector.
- uintptr_t reservation_start = ART_BASE_ADDRESS;
- uintptr_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
+ uint64_t reservation_start = ART_BASE_ADDRESS;
+ uint64_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
diff --git a/runtime/elf.h b/runtime/elf.h
index 63b18c5d34..521d4a232f 100644
--- a/runtime/elf.h
+++ b/runtime/elf.h
@@ -64,6 +64,9 @@ constexpr char ELFMAG0 = ElfMagic[EI_MAG0];
constexpr char ELFMAG1 = ElfMagic[EI_MAG1];
constexpr char ELFMAG2 = ElfMagic[EI_MAG2];
constexpr char ELFMAG3 = ElfMagic[EI_MAG3];
+constexpr char ELFMAG[] = "\177ELF";
+constexpr int SELFMAG = 4;
+constexpr int NT_PRSTATUS = 1;
// END android-added for <elf.h> compat
struct Elf32_Ehdr {
@@ -1411,7 +1414,9 @@ struct Elf32_Sym {
};
// BEGIN android-added for <elf.h> compat
+static inline unsigned char ELF32_ST_BIND(unsigned char st_info) { return st_info >> 4; }
static inline unsigned char ELF32_ST_TYPE(unsigned char st_info) { return st_info & 0x0f; }
+static inline unsigned char ELF64_ST_BIND(unsigned char st_info) { return st_info >> 4; }
static inline unsigned char ELF64_ST_TYPE(unsigned char st_info) { return st_info & 0x0f; }
// END android-added for <elf.h> compat
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 3048f45f30..9ef7d426df 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -25,12 +25,12 @@
#include "class_linker-inl.h"
#include "common_throws.h"
#include "dex/dex_file.h"
+#include "dex/invoke_type.h"
#include "entrypoints/quick/callee_save_frame.h"
#include "handle_scope-inl.h"
#include "imt_conflict_table.h"
#include "imtable-inl.h"
#include "indirect_reference_table.h"
-#include "invoke_type.h"
#include "jni_internal.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 74e7c180b8..48a56f2fbf 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -91,6 +91,7 @@
V(Asin, double, double) \
V(Atan, double, double) \
V(Atan2, double, double, double) \
+ V(Pow, double, double, double) \
V(Cbrt, double, double) \
V(Cosh, double, double) \
V(Exp, double, double) \
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 565b4edcc3..9b0756b529 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index f727690c11..c5157ce9f4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -785,7 +785,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
uint32_t shorty_len = 0;
ArtMethod* non_proxy_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
DCHECK(non_proxy_method->GetCodeItem() != nullptr) << method->PrettyMethod();
- CodeItemDataAccessor accessor(non_proxy_method);
+ CodeItemDataAccessor accessor(non_proxy_method->DexInstructionData());
const char* shorty = non_proxy_method->GetShorty(&shorty_len);
JValue result;
@@ -1121,7 +1121,7 @@ extern "C" const void* artQuickResolutionTrampoline(
// code.
if (!found_stack_map || kIsDebugBuild) {
uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
- CodeItemInstructionAccessor accessor(caller);
+ CodeItemInstructionAccessor accessor(caller->DexInstructions());
CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
const Instruction& instr = accessor.InstructionAt(dex_pc);
Instruction::Code instr_code = instr.Opcode();
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 7c912d0a4a..1fdf439d3f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -238,7 +238,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAcos, pAsin, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAsin, pAtan, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan, pAtan2, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan2, pCbrt, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan2, pPow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pPow, pCbrt, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCbrt, pCosh, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCosh, pExp, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pExp, pExpm1, sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 49c2a15e86..3015b10103 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,6 +17,7 @@
#include "fault_handler.h"
#include <setjmp.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
@@ -183,8 +184,31 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
return false;
}
+static const char* SignalCodeName(int sig, int code) {
+ if (sig != SIGSEGV) {
+ return "UNKNOWN";
+ } else {
+ switch (code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+ default: return "UNKNOWN";
+ }
+ }
+}
+static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
+ os << " si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
+ << " si_code: " << info->si_code
+ << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
+ if (info->si_signo == SIGSEGV) {
+ os << "\n" << " si_addr: " << info->si_addr;
+ }
+ return os;
+}
+
bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- VLOG(signals) << "Handling fault";
+ if (VLOG_IS_ON(signals)) {
+ PrintSignalInfo(VLOG_STREAM(signals) << "Handling fault:" << "\n", info);
+ }
#ifdef TEST_NESTED_SIGNAL
// Simulate a crash in a handler.
@@ -201,13 +225,13 @@ bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
return true;
}
}
+ }
- // We hit a signal we didn't handle. This might be something for which
- // we can give more information about so call all registered handlers to
- // see if it is.
- if (HandleFaultByOtherHandlers(sig, info, context)) {
- return true;
- }
+ // We hit a signal we didn't handle. This might be something for which
+ // we can give more information about so call all registered handlers to
+ // see if it is.
+ if (HandleFaultByOtherHandlers(sig, info, context)) {
+ return true;
}
// Set a breakpoint in this function to catch unhandled signals.
@@ -232,7 +256,7 @@ void FaultManager::RemoveHandler(FaultHandler* handler) {
}
auto it2 = std::find(other_handlers_.begin(), other_handlers_.end(), handler);
if (it2 != other_handlers_.end()) {
- other_handlers_.erase(it);
+ other_handlers_.erase(it2);
return;
}
LOG(FATAL) << "Attempted to remove non existent handler " << handler;
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 65062208d6..4570e9c1b8 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -37,6 +37,7 @@ static void art_heap_usage_error(const char* function, void* p);
#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../../external/dlmalloc/malloc.c"
// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
// of libbase, so undefine it now.
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index 29b96ee96c..b12691ad0e 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -32,6 +32,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wredundant-decls"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../external/dlmalloc/malloc.h"
#pragma GCC diagnostic pop
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index cf837161e0..1e0c0b16e4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -88,7 +88,6 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
from_space_num_bytes_at_first_pause_(0),
mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
- max_peak_num_non_free_regions_(0),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
mark_from_read_barrier_measurements_(false),
@@ -301,7 +300,6 @@ void ConcurrentCopying::InitializePhase() {
objects_moved_.StoreRelaxed(0);
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseForNativeAllocBlocking ||
gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
@@ -1755,8 +1753,6 @@ void ConcurrentCopying::ReclaimPhase() {
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
- max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
- region_space_->GetNumNonFreeRegions());
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
@@ -2269,7 +2265,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
size_t dummy;
- mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (to_ref != nullptr) {
@@ -2341,7 +2337,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
@@ -2696,10 +2692,10 @@ void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
os << "Peak regions allocated "
- << max_peak_num_non_free_regions_ << " ("
- << PrettySize(max_peak_num_non_free_regions_ * space::RegionSpace::kRegionSize)
- << ") / " << region_space_->GetNumRegions() << " ("
- << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize)
+ << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
+ << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
+ << ") / " << region_space_->GetNumRegions() / 2 << " ("
+ << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
<< ")\n";
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 939e7fc8a4..8b4b58e7b1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -308,11 +308,6 @@ class ConcurrentCopying : public GarbageCollector {
Atomic<uint64_t> cumulative_bytes_moved_;
Atomic<uint64_t> cumulative_objects_moved_;
- // Maintain the maximum of number of non-free regions collected just before
- // reclaim in each GC cycle. At this moment in cycle, highest number of
- // regions are in non-free.
- size_t max_peak_num_non_free_regions_;
-
// The skipped blocks are memory blocks/chucks that were copies of
// objects that were unused due to lost races (cas failures) at
// object copy/forward pointer install. They are reused.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 3150781a5a..1e136bca2e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -193,7 +193,6 @@ void SemiSpace::MarkingPhase() {
if (generational_) {
if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
- GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index d88fcdcc95..508d76535e 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -33,7 +33,6 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 78496f3ead..81781ceeb7 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -36,9 +36,6 @@ enum GcCause {
// GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
// (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
- // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
- // (This is always a blocking GC).
- kGcCauseForNativeAllocBlocking,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 52dd104ac8..6735961591 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -106,8 +106,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
- // bytes allocated that takes bulk thread-local buffer allocations into account.
- size_t bytes_tl_bulk_allocated = 0;
+ // Bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0u;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
@@ -154,12 +154,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
- bytes_tl_bulk_allocated;
+ size_t num_bytes_allocated_before =
+ num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
+ new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
- TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
+ TraceHeapSize(new_num_bytes_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f0f8b4e593..cf5bd4aed2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,9 +128,6 @@ static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
sizeof(mirror::HeapReference<mirror::Object>);
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
-// System.runFinalization can deadlock with native allocations, to deal with this, we have a
-// timeout on how long we wait for finalizers to run. b/21544853
-static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
// For deterministic compilation, we need the heap to be at a well-known address.
static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
@@ -561,12 +558,6 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
- native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
- native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
- *native_blocking_gc_lock_));
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_ = 0;
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
@@ -1143,7 +1134,6 @@ Heap::~Heap() {
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
- delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2565,10 +2555,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// old_native_bytes_allocated_ now that GC has been triggered, resetting
// new_native_bytes_allocated_ to zero in the process.
old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
- if (gc_cause == kGcCauseForNativeAllocBlocking) {
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_in_progress_ = true;
- }
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3395,7 +3381,6 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
// it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
if (cause == kGcCauseForAlloc ||
cause == kGcCauseForNativeAlloc ||
- cause == kGcCauseForNativeAllocBlocking ||
cause == kGcCauseDisableMovingGc) {
VLOG(gc) << "Starting a blocking GC " << cause;
}
@@ -3508,10 +3493,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
bytes_allocated_before_gc;
// Calculate when to perform the next ConcurrentGC.
- // Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
- size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
+ size_t remaining_bytes = bytes_allocated_during_gc;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
@@ -3781,59 +3764,9 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- // See the REDESIGN section of go/understanding-register-native-allocation
- // for an explanation of how RegisterNativeAllocation works.
- size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- if (new_value > NativeAllocationBlockingGcWatermark()) {
- // Wait for a new GC to finish and finalizers to run, because the
- // allocation rate is too high.
- Thread* self = ThreadForEnv(env);
-
- bool run_gc = false;
- {
- MutexLock mu(self, *native_blocking_gc_lock_);
- uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
- if (native_blocking_gc_in_progress_) {
- // A native blocking GC is in progress from the last time the native
- // allocation blocking GC watermark was exceeded. Wait for that GC to
- // finish before addressing the fact that we exceeded the blocking
- // watermark again.
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- initial_gcs_finished++;
- }
-
- // It's possible multiple threads have seen that we exceeded the
- // blocking watermark. Ensure that only one of those threads is assigned
- // to run the blocking GC. The rest of the threads should instead wait
- // for the blocking GC to complete.
- if (native_blocking_gcs_finished_ == initial_gcs_finished) {
- if (native_blocking_gc_is_assigned_) {
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- } else {
- native_blocking_gc_is_assigned_ = true;
- run_gc = true;
- }
- }
- }
+ size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- if (run_gc) {
- CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
-
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_++;
- native_blocking_gc_cond_->Broadcast(self);
- }
- } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
+ if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
!IsGCRequestPending()) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7dcf82f415..faa6195259 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -268,7 +268,7 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
@@ -1087,16 +1087,6 @@ class Heap {
return max_free_;
}
- // How large new_native_bytes_allocated_ can grow while GC is in progress
- // before we block the allocating thread to allow GC to catch up.
- ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
- // Historically the native allocations were bounded by growth_limit_. This
- // uses that same value, dividing growth_limit_ by 2 to account for
- // the fact that now the bound is relative to the number of retained
- // registered native allocations rather than absolute.
- return growth_limit_ / 2;
- }
-
void TraceHeapSize(size_t heap_size);
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
@@ -1252,23 +1242,6 @@ class Heap {
// old_native_bytes_allocated_ and new_native_bytes_allocated_.
Atomic<size_t> old_native_bytes_allocated_;
- // Used for synchronization when multiple threads call into
- // RegisterNativeAllocation and require blocking GC.
- // * If a previous blocking GC is in progress, all threads will wait for
- // that GC to complete, then wait for one of the threads to complete another
- // blocking GC.
- // * If a blocking GC is assigned but not in progress, a thread has been
- // assigned to run a blocking GC but has not started yet. Threads will wait
- // for the assigned blocking GC to complete.
- // * If a blocking GC is not assigned nor in progress, the first thread will
- // run a blocking GC and signal to other threads that blocking GC has been
- // assigned.
- Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
- uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
-
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
@@ -1320,10 +1293,6 @@ class Heap {
// Parallel GC data structures.
std::unique_ptr<ThreadPool> thread_pool_;
- // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
- // and the start of the current one.
- uint64_t allocation_rate_;
-
// For a GC cycle, a bitmap that is set corresponding to the
std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 251d94ca25..ca5a3eeb17 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -36,6 +36,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -1828,6 +1829,7 @@ std::string ImageSpace::GetMultiImageBootClassPath(
}
bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
+ const ArtDexFileLoader dex_file_loader;
for (const OatFile::OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
@@ -1838,7 +1840,7 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg
}
std::vector<uint32_t> checksums;
- if (!DexFileLoader::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
+ if (!dex_file_loader.GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
*error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
"referenced by oat file %s: %s",
dex_file_location.c_str(),
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index ea2168fe9c..e74e9b169f 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -115,7 +115,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetBytesAllocatedInternal() {
+inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -150,7 +150,7 @@ uint64_t RegionSpace::GetBytesAllocatedInternal() {
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetObjectsAllocatedInternal() {
+inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -185,7 +185,7 @@ uint64_t RegionSpace::GetObjectsAllocatedInternal() {
}
template<bool kToSpaceOnly, typename Visitor>
-void RegionSpace::WalkInternal(Visitor&& visitor) {
+inline void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
// issues (the classloader classes lock and the monitor lock). We
// call this with threads suspended.
@@ -237,9 +237,10 @@ inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
}
template<bool kForEvac>
-mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -274,7 +275,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
first_reg->UnfreeLarge(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
size_t allocated = num_regs * kRegionSize;
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
@@ -283,7 +288,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
regions_[p].UnfreeLargeTail(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
}
*bytes_allocated = allocated;
if (usable_size != nullptr) {
@@ -299,6 +308,35 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
return nullptr;
}
+template<bool kForEvac>
+inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
+ DCHECK(Contains(large_obj));
+ DCHECK_ALIGNED(large_obj, kRegionSize);
+ MutexLock mu(Thread::Current(), region_lock_);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
+ uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
+ CHECK_LT(begin_addr, end_addr);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(reg->IsLarge());
+ } else {
+ DCHECK(reg->IsLargeTail());
+ }
+ reg->Clear(/*zero_and_release_pages*/true);
+ if (kForEvac) {
+ --num_evac_regions_;
+ } else {
+ --num_non_free_regions_;
+ }
+ }
+ if (end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_reg->IsLargeTail());
+ }
+}
+
inline size_t RegionSpace::Region::BytesAllocated() const {
if (IsLarge()) {
DCHECK_LT(begin_ + kRegionSize, Top());
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a51df7c783..45cfff90cc 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -84,14 +84,18 @@ RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyAlwaysCollect),
- region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
- size_t mem_map_size = mem_map->Size();
- CHECK_ALIGNED(mem_map_size, kRegionSize);
+ region_lock_("Region lock", kRegionSpaceRegionLock),
+ time_(1U),
+ num_regions_(mem_map->Size() / kRegionSize),
+ num_non_free_regions_(0U),
+ num_evac_regions_(0U),
+ max_peak_num_non_free_regions_(0U),
+ non_free_region_index_limit_(0U),
+ current_region_(&full_region_),
+ evac_region_(nullptr) {
+ CHECK_ALIGNED(mem_map->Size(), kRegionSize);
CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
- num_regions_ = mem_map_size / kRegionSize;
- num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
- non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -112,8 +116,6 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
}
DCHECK(!full_region_.IsFree());
DCHECK(full_region_.IsAllocated());
- current_region_ = &full_region_;
- evac_region_ = nullptr;
size_t ignored;
DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
}
@@ -267,6 +269,9 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
VerifyNonFreeRegionLimit();
size_t new_non_free_region_index_limit = 0;
+ // Update max of peak non free region count before reclaiming evacuated regions.
+ max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
+ num_non_free_regions_);
// Combine zeroing and releasing pages to reduce how often madvise is called. This helps
// reduce contention on the mmap semaphore. b/62194020
// clear_region adds a region to the current block. If the region is not adjacent, the
@@ -350,6 +355,8 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
+ num_non_free_regions_ += num_evac_regions_;
+ num_evac_regions_ = 0;
}
void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
@@ -411,30 +418,6 @@ void RegionSpace::Dump(std::ostream& os) const {
<< reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
}
-void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
- DCHECK(Contains(large_obj));
- DCHECK_ALIGNED(large_obj, kRegionSize);
- MutexLock mu(Thread::Current(), region_lock_);
- uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
- uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
- CHECK_LT(begin_addr, end_addr);
- for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
- Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
- if (addr == begin_addr) {
- DCHECK(reg->IsLarge());
- } else {
- DCHECK(reg->IsLargeTail());
- }
- reg->Clear(/*zero_and_release_pages*/true);
- --num_non_free_regions_;
- }
- if (end_addr < Limit()) {
- // If we aren't at the end of the space, check that the next region is not a large tail.
- Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
- DCHECK(!following_reg->IsLargeTail());
- }
-}
-
void RegionSpace::DumpRegions(std::ostream& os) {
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -572,10 +555,12 @@ RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
Region* r = &regions_[i];
if (r->IsFree()) {
r->Unfree(this, time_);
- ++num_non_free_regions_;
- if (!for_evac) {
+ if (for_evac) {
+ ++num_evac_regions_;
// Evac doesn't count as newly allocated.
+ } else {
r->SetNewlyAllocated();
+ ++num_non_free_regions_;
}
return r;
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index c9c9136c27..ef8aa52a03 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -64,6 +64,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
template<bool kForEvac>
mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+ template<bool kForEvac>
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
@@ -138,9 +139,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
}
- // It is OK to do a racy read here as it's only for performance dump.
- size_t GetNumNonFreeRegions() const {
- return num_non_free_regions_;
+ size_t GetMaxPeakNumNonFreeRegions() const {
+ return max_peak_num_non_free_regions_;
}
size_t GetNumRegions() const {
return num_regions_;
@@ -530,8 +530,18 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
- size_t num_regions_; // The number of regions in this space.
- size_t num_non_free_regions_; // The number of non-free regions in this space.
+ const size_t num_regions_; // The number of regions in this space.
+ // The number of non-free regions in this space.
+ size_t num_non_free_regions_ GUARDED_BY(region_lock_);
+
+ // The number of evac regions allocated during collection. 0 when GC not running.
+ size_t num_evac_regions_ GUARDED_BY(region_lock_);
+
+ // Maintain the maximum of number of non-free regions collected just before
+ // reclaim in each GC cycle. At this moment in cycle, highest number of
+ // regions are in non-free.
+ size_t max_peak_num_non_free_regions_;
+
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
// The upper-bound index of the non-free regions. Used to avoid scanning all regions in
diff --git a/runtime/globals.h b/runtime/globals.h
index f14d6e95a6..ca4040d777 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -62,6 +62,12 @@ static constexpr bool kIsDebugBuild = GlobalsReturnSelf(false);
static constexpr bool kIsDebugBuild = GlobalsReturnSelf(true);
#endif
+#if defined(ART_PGO_INSTRUMENTATION)
+static constexpr bool kIsPGOInstrumentation = true;
+#else
+static constexpr bool kIsPGOInstrumentation = false;
+#endif
+
// ART_TARGET - Defined for target builds of ART.
// ART_TARGET_LINUX - Defined for target Linux builds of ART.
// ART_TARGET_ANDROID - Defined for target Android builds of ART.
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
new file mode 100644
index 0000000000..05e68e66dd
--- /dev/null
+++ b/runtime/hidden_api.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HIDDEN_API_H_
+#define ART_RUNTIME_HIDDEN_API_H_
+
+#include "hidden_api_access_flags.h"
+#include "reflection.h"
+#include "runtime.h"
+
+namespace art {
+namespace hiddenapi {
+
+enum Action {
+ kAllow,
+ kAllowButWarn,
+ kAllowButWarnAndToast,
+ kDeny
+};
+
+inline Action GetMemberAction(uint32_t access_flags) {
+ switch (HiddenApiAccessFlags::DecodeFromRuntime(access_flags)) {
+ case HiddenApiAccessFlags::kWhitelist:
+ return kAllow;
+ case HiddenApiAccessFlags::kLightGreylist:
+ return kAllowButWarn;
+ case HiddenApiAccessFlags::kDarkGreylist:
+ return kAllowButWarnAndToast;
+ case HiddenApiAccessFlags::kBlacklist:
+ return kDeny;
+ }
+}
+
+// Issue a warning about field access.
+inline void WarnAboutMemberAccess(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string tmp;
+ LOG(WARNING) << "Accessing hidden field "
+ << field->GetDeclaringClass()->GetDescriptor(&tmp) << "->"
+ << field->GetName() << ":" << field->GetTypeDescriptor();
+}
+
+// Issue a warning about method access.
+inline void WarnAboutMemberAccess(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::string tmp;
+ LOG(WARNING) << "Accessing hidden method "
+ << method->GetDeclaringClass()->GetDescriptor(&tmp) << "->"
+ << method->GetName() << method->GetSignature().ToString();
+}
+
+// Returns true if access to `member` should be denied to the caller of the
+// reflective query. The decision is based on whether the caller is in boot
+// class path or not. Because different users of this function determine this
+// in a different way, `fn_caller_in_boot(self)` is called and should return
+// true if the caller is in boot class path.
+// This function might print warnings into the log if the member is greylisted.
+template<typename T>
+inline bool ShouldBlockAccessToMember(T* member,
+ Thread* self,
+ std::function<bool(Thread*)> fn_caller_in_boot)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(member != nullptr);
+ Runtime* runtime = Runtime::Current();
+
+ if (!runtime->AreHiddenApiChecksEnabled()) {
+ // Exit early. Nothing to enforce.
+ return false;
+ }
+
+ Action action = GetMemberAction(member->GetAccessFlags());
+ if (action == kAllow) {
+ // Nothing to do.
+ return false;
+ }
+
+ // Member is hidden. Walk the stack to find the caller.
+ // This can be *very* expensive. Save it for last.
+ if (fn_caller_in_boot(self)) {
+ // Caller in boot class path. Exit.
+ return false;
+ }
+
+ // Member is hidden and we are not in the boot class path. Act accordingly.
+ if (action == kDeny) {
+ return true;
+ } else {
+ DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast);
+
+ // Allow access to this member but print a warning. Depending on a runtime
+ // flag, we might move the member into whitelist and skip the warning the
+ // next time the member is used.
+ if (runtime->ShouldDedupeHiddenApiWarnings()) {
+ member->SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(
+ member->GetAccessFlags(), HiddenApiAccessFlags::kWhitelist));
+ }
+ WarnAboutMemberAccess(member);
+ if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
+ Runtime::Current()->SetPendingHiddenApiWarning(true);
+ }
+ return false;
+ }
+}
+
+// Returns true if access to member with `access_flags` should be denied to `caller`.
+// This function should be called on statically linked uses of hidden API.
+inline bool ShouldBlockAccessToMember(uint32_t access_flags, mirror::Class* caller)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!Runtime::Current()->AreHiddenApiChecksEnabled()) {
+ // Exit early. Nothing to enforce.
+ return false;
+ }
+
+ // Only continue if we want to deny access. Warnings are *not* printed.
+ if (GetMemberAction(access_flags) != kDeny) {
+ return false;
+ }
+
+ // Member is hidden. Check if the caller is in boot class path.
+ if (caller == nullptr) {
+ // The caller is unknown. We assume that this is *not* boot class path.
+ return true;
+ }
+
+ return !caller->IsBootStrapClassLoaded();
+}
+
+} // namespace hiddenapi
+} // namespace art
+
+#endif // ART_RUNTIME_HIDDEN_API_H_
diff --git a/runtime/hidden_api_access_flags.h b/runtime/hidden_api_access_flags.h
new file mode 100644
index 0000000000..c328f965d2
--- /dev/null
+++ b/runtime/hidden_api_access_flags.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
+#define ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
+
+#include "base/bit_utils.h"
+#include "dex/modifiers.h"
+
+namespace art {
+
+/* This class is used for encoding and decoding access flags of class members
+ * from the boot class path. These access flags might contain additional two bits
+ * of information on whether the given class member should be hidden from apps
+ * and under what circumstances.
+ *
+ * The encoding is different inside DexFile, where we are concerned with size,
+ * and at runtime where we want to optimize for speed of access. The class
+ * provides helper functions to decode/encode both of them.
+ *
+ * Encoding in DexFile
+ * ===================
+ *
+ * First bit is encoded as inversion of visibility flags (public/private/protected).
+ * At most one can be set for any given class member. If two or three are set,
+ * this is interpreted as the first bit being set and actual visibility flags
+ * being the complement of the encoded flags.
+ *
+ * Second bit is either encoded as bit 5 for fields and non-native methods, where
+ * it carries no other meaning. If a method is native (bit 8 set), bit 9 is used.
+ *
+ * Bits were selected so that they never increase the length of unsigned LEB-128
+ * encoding of the access flags.
+ *
+ * Encoding at runtime
+ * ===================
+ *
+ * Two bits are set aside in the uint32_t access flags in the intrinsics ordinal
+ * space (thus intrinsics need to be special-cased). These are two consecutive
+ * bits and they are directly used to store the integer value of the ApiList
+ * enum values.
+ *
+ */
+class HiddenApiAccessFlags {
+ public:
+ enum ApiList {
+ kWhitelist = 0,
+ kLightGreylist,
+ kDarkGreylist,
+ kBlacklist,
+ };
+
+ static ALWAYS_INLINE ApiList DecodeFromDex(uint32_t dex_access_flags) {
+ DexHiddenAccessFlags flags(dex_access_flags);
+ uint32_t int_value = (flags.IsFirstBitSet() ? 1 : 0) + (flags.IsSecondBitSet() ? 2 : 0);
+ return static_cast<ApiList>(int_value);
+ }
+
+ static ALWAYS_INLINE uint32_t RemoveFromDex(uint32_t dex_access_flags) {
+ DexHiddenAccessFlags flags(dex_access_flags);
+ flags.SetFirstBit(false);
+ flags.SetSecondBit(false);
+ return flags.GetEncoding();
+ }
+
+ static ALWAYS_INLINE uint32_t EncodeForDex(uint32_t dex_access_flags, ApiList value) {
+ DexHiddenAccessFlags flags(RemoveFromDex(dex_access_flags));
+ uint32_t int_value = static_cast<uint32_t>(value);
+ flags.SetFirstBit((int_value & 1) != 0);
+ flags.SetSecondBit((int_value & 2) != 0);
+ return flags.GetEncoding();
+ }
+
+ static ALWAYS_INLINE ApiList DecodeFromRuntime(uint32_t runtime_access_flags) {
+ if ((runtime_access_flags & kAccIntrinsic) != 0) {
+ return kWhitelist;
+ } else {
+ uint32_t int_value = (runtime_access_flags & kAccHiddenApiBits) >> kAccFlagsShift;
+ return static_cast<ApiList>(int_value);
+ }
+ }
+
+ static ALWAYS_INLINE uint32_t EncodeForRuntime(uint32_t runtime_access_flags, ApiList value) {
+ CHECK_EQ(runtime_access_flags & kAccIntrinsic, 0u);
+
+ uint32_t hidden_api_flags = static_cast<uint32_t>(value) << kAccFlagsShift;
+ CHECK_EQ(hidden_api_flags & ~kAccHiddenApiBits, 0u);
+
+ runtime_access_flags &= ~kAccHiddenApiBits;
+ return runtime_access_flags | hidden_api_flags;
+ }
+
+ private:
+ static const int kAccFlagsShift = CTZ(kAccHiddenApiBits);
+ static_assert(IsPowerOfTwo((kAccHiddenApiBits >> kAccFlagsShift) + 1),
+ "kAccHiddenApiBits are not continuous");
+
+ struct DexHiddenAccessFlags {
+ explicit DexHiddenAccessFlags(uint32_t access_flags) : access_flags_(access_flags) {}
+
+ ALWAYS_INLINE uint32_t GetSecondFlag() {
+ return ((access_flags_ & kAccNative) != 0) ? kAccDexHiddenBitNative : kAccDexHiddenBit;
+ }
+
+ ALWAYS_INLINE bool IsFirstBitSet() {
+ static_assert(IsPowerOfTwo(0u), "Following statement checks if *at most* one bit is set");
+ return !IsPowerOfTwo(access_flags_ & kAccVisibilityFlags);
+ }
+
+ ALWAYS_INLINE void SetFirstBit(bool value) {
+ if (IsFirstBitSet() != value) {
+ access_flags_ ^= kAccVisibilityFlags;
+ }
+ }
+
+ ALWAYS_INLINE bool IsSecondBitSet() {
+ return (access_flags_ & GetSecondFlag()) != 0;
+ }
+
+ ALWAYS_INLINE void SetSecondBit(bool value) {
+ if (value) {
+ access_flags_ |= GetSecondFlag();
+ } else {
+ access_flags_ &= ~GetSecondFlag();
+ }
+ }
+
+ ALWAYS_INLINE uint32_t GetEncoding() const {
+ return access_flags_;
+ }
+
+ uint32_t access_flags_;
+ };
+};
+
+} // namespace art
+
+
+#endif // ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
diff --git a/runtime/image.cc b/runtime/image.cc
index b9d955c08c..8e3615ffcf 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '2', '\0' }; // 4-bit ClassStatus.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '4', '\0' }; // Math.pow() intrinsic.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/imtable-inl.h b/runtime/imtable-inl.h
index 6237cca9e4..93346f6151 100644
--- a/runtime/imtable-inl.h
+++ b/runtime/imtable-inl.h
@@ -21,7 +21,7 @@
#include "art_method-inl.h"
#include "dex/dex_file.h"
-#include "utf.h"
+#include "dex/utf.h"
namespace art {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4524448916..24cedb093b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -776,6 +776,17 @@ void Instrumentation::UpdateMethodsCodeImpl(ArtMethod* method, const void* quick
UpdateEntrypoints(method, new_quick_code);
}
+void Instrumentation::UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code) {
+ // We don't do any read barrier on `method`'s declaring class in this code, as the JIT might
+ // enter here on a soon-to-be deleted ArtMethod. Updating the entrypoint is OK though, as
+ // the ArtMethod is still in memory.
+ const void* new_quick_code = quick_code;
+ if (UNLIKELY(instrumentation_stubs_installed_) && entry_exit_stubs_installed_) {
+ new_quick_code = GetQuickInstrumentationEntryPoint();
+ }
+ UpdateEntrypoints(method, new_quick_code);
+}
+
void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_code) {
DCHECK(method->GetDeclaringClass()->IsResolved());
UpdateMethodsCodeImpl(method, quick_code);
@@ -1373,8 +1384,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
- << " at PC " << reinterpret_cast<void*>(*return_pc);
+ VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
}
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index da63152d10..46b3f8d85f 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -280,6 +280,10 @@ class Instrumentation {
void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ // Update the code of a native method to a JITed stub.
+ void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* quick_code)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+
// Update the code of a method to the interpreter respecting any installed stubs from debugger.
void UpdateMethodsCodeToInterpreterEntryPoint(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 5b93d3b873..4b964f648b 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -18,6 +18,7 @@
#include <memory>
+#include "dex/utf.h"
#include "gc/collector/garbage_collector.h"
#include "gc/space/image_space.h"
#include "gc/weak_root_state.h"
@@ -30,7 +31,6 @@
#include "object_callbacks.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
-#include "utf.h"
namespace art {
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index 9c3ea8d864..b56c48d78c 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -18,12 +18,12 @@
#include "base/hash_set.h"
#include "common_runtime_test.h"
+#include "dex/utf.h"
#include "gc_root-inl.h"
#include "handle_scope-inl.h"
#include "mirror/object.h"
#include "mirror/string.h"
#include "scoped_thread_state_change-inl.h"
-#include "utf.h"
namespace art {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 54db87297d..735c0e815a 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -389,7 +389,7 @@ void EnterInterpreterFromInvoke(Thread* self,
}
const char* old_cause = self->StartAssertNoThreadSuspension("EnterInterpreterFromInvoke");
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
uint16_t num_regs;
uint16_t num_ins;
if (accessor.HasCodeItem()) {
@@ -499,7 +499,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
DCHECK(!shadow_frame->GetMethod()->MustCountLocks());
self->SetTopOfShadowStack(shadow_frame);
- CodeItemDataAccessor accessor(shadow_frame->GetMethod());
+ CodeItemDataAccessor accessor(shadow_frame->GetMethod()->DexInstructionData());
const uint32_t dex_pc = shadow_frame->GetDexPC();
uint32_t new_dex_pc = dex_pc;
if (UNLIKELY(self->IsExceptionPending())) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 475f93803d..12b8c38bbb 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1183,10 +1183,9 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
}
Handle<mirror::Object> object(hs.NewHandle(result.GetL()));
-
- // Check the result is not null.
if (UNLIKELY(object.IsNull())) {
- ThrowNullPointerException("CallSite == null");
+ // This will typically be for LambdaMetafactory which is not supported.
+ ThrowNullPointerException("Bootstrap method returned null");
return nullptr;
}
@@ -1202,7 +1201,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Check the call site target is not null as we're going to invoke it.
Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
if (UNLIKELY(target.IsNull())) {
- ThrowNullPointerException("CallSite target == null");
+ ThrowNullPointerException("Target for call-site is null");
return nullptr;
}
@@ -1320,7 +1319,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
}
// Compute method information.
- CodeItemDataAccessor accessor(called_method);
+ CodeItemDataAccessor accessor(called_method->DexInstructionData());
// Number of registers for the callee's call frame.
uint16_t num_regs;
// Test whether to use the interpreter or compiler entrypoint, and save that result to pass to
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 99a4f763c9..681a582b5d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -478,6 +478,7 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
UNIMPLEMENTED_CASE(MathLog /* (D)D */)
UNIMPLEMENTED_CASE(MathLog10 /* (D)D */)
UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathPow /* (DD)D */)
UNIMPLEMENTED_CASE(MathSinh /* (D)D */)
INTRINSIC_CASE(MathTan)
UNIMPLEMENTED_CASE(MathTanh /* (D)D */)
diff --git a/runtime/interpreter/shadow_frame.cc b/runtime/interpreter/shadow_frame.cc
index fe7e3e0a9b..264ec6a997 100644
--- a/runtime/interpreter/shadow_frame.cc
+++ b/runtime/interpreter/shadow_frame.cc
@@ -28,7 +28,7 @@ mirror::Object* ShadowFrame::GetThisObject() const {
return GetVRegReference(0);
} else {
CHECK(m->GetCodeItem() != nullptr) << ArtMethod::PrettyMethod(m);
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t reg = accessor.RegistersSize() - accessor.InsSize();
return GetVRegReference(reg);
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index d1436fa9cf..85acc71377 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -38,6 +38,7 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/reference_processor.h"
#include "handle_scope-inl.h"
+#include "hidden_api.h"
#include "interpreter/interpreter_common.h"
#include "jvalue-inl.h"
#include "mirror/array-inl.h"
@@ -175,6 +176,13 @@ static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, siz
return param->AsString();
}
+template<typename T>
+static ALWAYS_INLINE bool ShouldBlockAccessToMember(T* member, ShadowFrame* frame)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return hiddenapi::ShouldBlockAccessToMember(
+ member->GetAccessFlags(), frame->GetMethod()->GetDeclaringClass());
+}
+
void UnstartedRuntime::UnstartedClassForNameCommon(Thread* self,
ShadowFrame* shadow_frame,
JValue* result,
@@ -265,7 +273,10 @@ void UnstartedRuntime::UnstartedClassNewInstance(
bool ok = false;
auto* cl = Runtime::Current()->GetClassLinker();
if (cl->EnsureInitialized(self, h_klass, true, true)) {
- auto* cons = h_klass->FindConstructor("()V", cl->GetImagePointerSize());
+ ArtMethod* cons = h_klass->FindConstructor("()V", cl->GetImagePointerSize());
+ if (cons != nullptr && ShouldBlockAccessToMember(cons, shadow_frame)) {
+ cons = nullptr;
+ }
if (cons != nullptr) {
Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
CHECK(h_obj != nullptr); // We don't expect OOM at compile-time.
@@ -308,6 +319,9 @@ void UnstartedRuntime::UnstartedClassGetDeclaredField(
}
}
}
+ if (found != nullptr && ShouldBlockAccessToMember(found, shadow_frame)) {
+ found = nullptr;
+ }
if (found == nullptr) {
AbortTransactionOrFail(self, "Failed to find field in Class.getDeclaredField in un-started "
" runtime. name=%s class=%s", name2->ToModifiedUtf8().c_str(),
@@ -370,6 +384,9 @@ void UnstartedRuntime::UnstartedClassGetDeclaredMethod(
self, klass, name, args);
}
}
+ if (method != nullptr && ShouldBlockAccessToMember(method->GetArtMethod(), shadow_frame)) {
+ method = nullptr;
+ }
result->SetL(method);
}
@@ -404,6 +421,10 @@ void UnstartedRuntime::UnstartedClassGetDeclaredConstructor(
false>(self, klass, args);
}
}
+ if (constructor != nullptr &&
+ ShouldBlockAccessToMember(constructor->GetArtMethod(), shadow_frame)) {
+ constructor = nullptr;
+ }
result->SetL(constructor);
}
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index d007728750..da08793f59 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -136,6 +136,7 @@
V(MathAsin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "asin", "(D)D") \
V(MathAtan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan", "(D)D") \
V(MathAtan2, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan2", "(DD)D") \
+ V(MathPow, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "pow", "(DD)D") \
V(MathCbrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cbrt", "(D)D") \
V(MathCosh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cosh", "(D)D") \
V(MathExp, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "exp", "(D)D") \
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 89eef88b88..90cac853ff 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -27,6 +27,7 @@
#include "base/logging.h" // For VLOG.
#include "base/macros.h"
#include "debugger.h"
+#include "dex/utf.h"
#include "jdwp/jdwp_constants.h"
#include "jdwp/jdwp_event.h"
#include "jdwp/jdwp_expand_buf.h"
@@ -34,7 +35,6 @@
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
-#include "utils.h"
namespace art {
diff --git a/runtime/jdwp_provider.h b/runtime/jdwp_provider.h
index b62e10b4f8..698fdc086d 100644
--- a/runtime/jdwp_provider.h
+++ b/runtime/jdwp_provider.h
@@ -28,6 +28,9 @@ enum class JdwpProvider {
kNone,
kInternal,
kAdbConnection,
+
+ // The current default provider
+ kDefaultJdwpProvider = kAdbConnection,
};
std::ostream& operator<<(std::ostream& os, const JdwpProvider& rhs);
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 4d1c85a1c2..d60f70a54f 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -42,6 +42,7 @@ extern "C" {
JITCodeEntry* prev_;
const uint8_t *symfile_addr_;
uint64_t symfile_size_;
+ uint32_t ref_count; // ART internal field.
};
struct JITDescriptor {
@@ -67,12 +68,67 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+
+ // Incremented whenever __jit_debug_descriptor is modified.
+ uint32_t __jit_debug_descriptor_timestamp = 0;
+
+ struct DEXFileEntry {
+ DEXFileEntry* next_;
+ DEXFileEntry* prev_;
+ const void* dexfile_;
+ };
+
+ DEXFileEntry* __art_debug_dexfiles = nullptr;
+
+ // Incremented whenever __art_debug_dexfiles is modified.
+ uint32_t __art_debug_dexfiles_timestamp = 0;
}
-static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+static size_t g_jit_debug_mem_usage
+ GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
+
+static std::unordered_map<const void*, DEXFileEntry*> g_dexfile_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ if (g_dexfile_entries.count(dexfile_header) == 0) {
+ DEXFileEntry* entry = new DEXFileEntry();
+ CHECK(entry != nullptr);
+ entry->dexfile_ = dexfile_header;
+ entry->prev_ = nullptr;
+ entry->next_ = __art_debug_dexfiles;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __art_debug_dexfiles = entry;
+ __art_debug_dexfiles_timestamp++;
+ g_dexfile_entries.emplace(dexfile_header, entry);
+ }
+}
-static JITCodeEntry* CreateJITCodeEntryInternal(std::vector<uint8_t> symfile)
- REQUIRES(g_jit_debug_mutex) {
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ auto it = g_dexfile_entries.find(dexfile_header);
+ // We register dex files in the class linker and free them in DexFile_closeDexFile,
+ // but might be cases where we load the dex file without using it in the class linker.
+ if (it != g_dexfile_entries.end()) {
+ DEXFileEntry* entry = it->second;
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __art_debug_dexfiles = entry->next_;
+ }
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+ __art_debug_dexfiles_timestamp++;
+ delete entry;
+ g_dexfile_entries.erase(it);
+ }
+}
+
+JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
// Make a copy of the buffer. We want to shrink it anyway.
@@ -85,20 +141,21 @@ static JITCodeEntry* CreateJITCodeEntryInternal(std::vector<uint8_t> symfile)
entry->symfile_addr_ = symfile_copy;
entry->symfile_size_ = symfile.size();
entry->prev_ = nullptr;
-
+ entry->ref_count = 0;
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != nullptr) {
entry->next_->prev_ = entry;
}
+ g_jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
-
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
return entry;
}
-static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug_mutex) {
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
} else {
@@ -109,48 +166,42 @@ static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug
entry->next_->prev_ = entry->prev_;
}
+ g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
delete[] entry->symfile_addr_;
delete entry;
}
-JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- return CreateJITCodeEntryInternal(std::move(symfile));
+// Mapping from code address to entry. Used to manage life-time of the entries.
+static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
+ DCHECK(entry != nullptr);
+ DCHECK_EQ(g_jit_code_entries.count(code_address), 0u);
+ entry->ref_count++;
+ g_jit_code_entries.emplace(code_address, entry);
}
-void DeleteJITCodeEntry(JITCodeEntry* entry) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- DeleteJITCodeEntryInternal(entry);
+void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
+ DCHECK(entry != nullptr);
+ DCHECK(g_jit_code_entries[code_address] == entry);
+ if (--entry->ref_count == 0) {
+ DeleteJITCodeEntry(entry);
+ }
+ g_jit_code_entries.erase(code_address);
}
-// Mapping from address to entry. It takes ownership of the entries
-// so that the user of the JIT interface does not have to store them.
-static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
-
-void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- DCHECK_NE(address, 0u);
- DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
- JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile));
- g_jit_code_entries.emplace(address, entry);
+JITCodeEntry* GetJITCodeEntry(uintptr_t code_address) {
+ auto it = g_jit_code_entries.find(code_address);
+ return it == g_jit_code_entries.end() ? nullptr : it->second;
}
-bool DeleteJITCodeEntryForAddress(uintptr_t address) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- const auto it = g_jit_code_entries.find(address);
- if (it == g_jit_code_entries.end()) {
- return false;
- }
- DeleteJITCodeEntryInternal(it->second);
- g_jit_code_entries.erase(it);
- return true;
+size_t GetJITCodeEntryMemUsage() {
+ return g_jit_debug_mem_usage + g_jit_code_entries.size() * 2 * sizeof(void*);
}
} // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index d9bf331aab..8c4bb3fdf4 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -21,28 +21,51 @@
#include <memory>
#include <vector>
+#include "base/array_ref.h"
+#include "base/mutex.h"
+
namespace art {
extern "C" {
struct JITCodeEntry;
}
+// Notify native tools (e.g. libunwind) that DEX file has been opened.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
+// Notify native tools (e.g. libunwind) that DEX file has been closed.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
-JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile);
+JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
+ REQUIRES(Locks::native_debug_interface_lock_);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
-void DeleteJITCodeEntry(JITCodeEntry* entry);
+void DeleteJITCodeEntry(JITCodeEntry* entry)
+ REQUIRES(Locks::native_debug_interface_lock_);
-// Notify native debugger about new JITed code by passing in-memory ELF.
-// The address is used only to uniquely identify the entry.
-// It takes ownership of the in-memory ELF file.
-void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile);
+// Helper method to track life-time of JITCodeEntry.
+// It registers given code address as being described by the given entry.
+void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
+ REQUIRES(Locks::native_debug_interface_lock_);
-// Notify native debugger that JITed code has been removed.
-// Returns false if entry for the given address was not found.
-bool DeleteJITCodeEntryForAddress(uintptr_t address);
+// Helper method to track life-time of JITCodeEntry.
+// It de-registers given code address as being described by the given entry.
+void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
+ REQUIRES(Locks::native_debug_interface_lock_);
+
+// Find the registered JITCodeEntry for given code address.
+// There can be only one entry per address at any given time.
+JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
+ REQUIRES(Locks::native_debug_interface_lock_);
+
+// Returns approximate memory used by all JITCodeEntries.
+size_t GetJITCodeEntryMemUsage()
+ REQUIRES(Locks::native_debug_interface_lock_);
} // namespace art
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 12bf79d7ca..1baa613bb5 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -467,7 +467,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// Fetch some data before looking up for an OSR method. We don't want thread
// suspension once we hold an OSR method, as the JIT code cache could delete the OSR
// method while we are being suspended.
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
const char* shorty = method->GetShorty();
std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 791c3386cf..6d27cfe5db 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -23,7 +23,6 @@
#include "base/timing_logger.h"
#include "jit/profile_saver_options.h"
#include "obj_ptr.h"
-#include "profile_compilation_info.h"
#include "thread_pool.h"
namespace art {
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 659c55a289..c8c13cb20f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,11 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
+ JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
+ if (entry != nullptr) {
+ DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
+ }
if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
FreeData(GetRootTable(code_ptr));
} // else this is a JNI stub without any data.
@@ -1695,7 +1699,9 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr
// can avoid a few expensive GenericJNI calls.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
for (ArtMethod* m : data->GetMethods()) {
- instrumentation->UpdateMethodsCode(m, entrypoint);
+ // Call the dedicated method instead of the more generic UpdateMethodsCode, because
+ // `m` might be in the process of being deleted.
+ instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
}
if (collection_in_progress_) {
GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
@@ -1819,8 +1825,10 @@ void JitCodeCache::FreeData(uint8_t* data) {
void JitCodeCache::Dump(std::ostream& os) {
MutexLock mu(Thread::Current(), lock_);
+ MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
+ << "Current JIT mini-debug-info size: " << PrettySize(GetJITCodeEntryMemUsage()) << "\n"
<< "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
<< "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
<< "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 74bf237c31..de4d02edaf 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -47,6 +47,7 @@
#include "os.h"
#include "safe_map.h"
#include "utils.h"
+#include "zip_archive.h"
namespace art {
@@ -56,6 +57,10 @@ const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
// before corresponding method_encodings and class_ids.
const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0' };
+// The name of the profile entry in the dex metadata file.
+// DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
+const char* ProfileCompilationInfo::kDexMetadataProfileEntry = "primary.prof";
+
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
// Debug flag to ignore checksums when testing if a method or a class is present in the profile.
@@ -194,7 +199,7 @@ bool ProfileCompilationInfo::MergeWith(const std::string& filename) {
int fd = profile_file->Fd();
- ProfileLoadSatus status = LoadInternal(fd, &error);
+ ProfileLoadStatus status = LoadInternal(fd, &error);
if (status == kProfileLoadSuccess) {
return true;
}
@@ -225,7 +230,7 @@ bool ProfileCompilationInfo::Load(const std::string& filename, bool clear_if_inv
int fd = profile_file->Fd();
- ProfileLoadSatus status = LoadInternal(fd, &error);
+ ProfileLoadStatus status = LoadInternal(fd, &error);
if (status == kProfileLoadSuccess) {
return true;
}
@@ -770,8 +775,14 @@ bool ProfileCompilationInfo::ReadInlineCache(
for (; dex_classes_size > 0; dex_classes_size--) {
uint16_t type_index;
READ_UINT(uint16_t, buffer, type_index, error);
- dex_pc_data->AddClass(dex_profile_index_remap.Get(dex_profile_index),
- dex::TypeIndex(type_index));
+ auto it = dex_profile_index_remap.find(dex_profile_index);
+ if (it == dex_profile_index_remap.end()) {
+ // If we don't have an index that's because the dex file was filtered out when loading.
+ // Set missing types on the dex pc data.
+ dex_pc_data->SetIsMissingTypes();
+ } else {
+ dex_pc_data->AddClass(it->second, dex::TypeIndex(type_index));
+ }
}
}
}
@@ -883,25 +894,13 @@ bool ProfileCompilationInfo::SafeBuffer::CompareAndAdvance(const uint8_t* data,
return false;
}
-ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::SafeBuffer::FillFromFd(
- int fd,
- const std::string& source,
- /*out*/std::string* error) {
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::SafeBuffer::Fill(
+ ProfileSource& source,
+ const std::string& debug_stage,
+ /*out*/ std::string* error) {
size_t byte_count = (ptr_end_ - ptr_current_) * sizeof(*ptr_current_);
uint8_t* buffer = ptr_current_;
- while (byte_count > 0) {
- int bytes_read = TEMP_FAILURE_RETRY(read(fd, buffer, byte_count));
- if (bytes_read == 0) {
- *error += "Profile EOF reached prematurely for " + source;
- return kProfileLoadBadData;
- } else if (bytes_read < 0) {
- *error += "Profile IO error for " + source + strerror(errno);
- return kProfileLoadIOError;
- }
- byte_count -= bytes_read;
- buffer += bytes_read;
- }
- return kProfileLoadSuccess;
+ return source.Read(buffer, byte_count, debug_stage, error);
}
size_t ProfileCompilationInfo::SafeBuffer::CountUnreadBytes() {
@@ -916,8 +915,8 @@ void ProfileCompilationInfo::SafeBuffer::Advance(size_t data_size) {
ptr_current_ += data_size;
}
-ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileHeader(
- int fd,
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadProfileHeader(
+ ProfileSource& source,
/*out*/uint8_t* number_of_dex_files,
/*out*/uint32_t* uncompressed_data_size,
/*out*/uint32_t* compressed_data_size,
@@ -932,7 +931,7 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileHead
SafeBuffer safe_buffer(kMagicVersionSize);
- ProfileLoadSatus status = safe_buffer.FillFromFd(fd, "ReadProfileHeader", error);
+ ProfileLoadStatus status = safe_buffer.Fill(source, "ReadProfileHeader", error);
if (status != kProfileLoadSuccess) {
return status;
}
@@ -972,7 +971,7 @@ bool ProfileCompilationInfo::ReadProfileLineHeaderElements(SafeBuffer& buffer,
return true;
}
-ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLineHeader(
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadProfileLineHeader(
SafeBuffer& buffer,
/*out*/ProfileLineHeader* line_header,
/*out*/std::string* error) {
@@ -1003,7 +1002,7 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine
return kProfileLoadSuccess;
}
-ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine(
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadProfileLine(
SafeBuffer& buffer,
uint8_t number_of_dex_files,
const ProfileLineHeader& line_header,
@@ -1043,10 +1042,11 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine
// TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
// return a unique pointer to a ProfileCompilationInfo upon success.
-bool ProfileCompilationInfo::Load(int fd, bool merge_classes) {
+bool ProfileCompilationInfo::Load(
+ int fd, bool merge_classes, const ProfileLoadFilterFn& filter_fn) {
std::string error;
- ProfileLoadSatus status = LoadInternal(fd, &error, merge_classes);
+ ProfileLoadStatus status = LoadInternal(fd, &error, merge_classes, filter_fn);
if (status == kProfileLoadSuccess) {
return true;
@@ -1148,31 +1148,139 @@ bool ProfileCompilationInfo::VerifyProfileData(const std::vector<const DexFile*>
return true;
}
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
+ int32_t fd,
+ /*out*/ std::unique_ptr<ProfileSource>* source,
+ /*out*/ std::string* error) {
+ if (IsProfileFile(fd)) {
+ source->reset(ProfileSource::Create(fd));
+ return kProfileLoadSuccess;
+ } else {
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, "profile", error));
+ if (zip_archive.get() == nullptr) {
+ *error = "Could not open the profile zip archive";
+ return kProfileLoadBadData;
+ }
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(kDexMetadataProfileEntry, error));
+ if (zip_entry == nullptr) {
+ // Allow archives without the profile entry. In this case, create an empty profile.
+ // This gives more flexible when ure-using archives that may miss the entry.
+ // (e.g. dex metadata files)
+ LOG(WARNING) << std::string("Could not find entry ") + kDexMetadataProfileEntry +
+ " in the zip archive. Creating an empty profile.";
+ source->reset(ProfileSource::Create(nullptr));
+ return kProfileLoadSuccess;
+ }
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error = "Empty profile entry in the zip archive.";
+ return kProfileLoadBadData;
+ }
+
+ std::unique_ptr<MemMap> map;
+ if (zip_entry->IsUncompressed()) {
+ // Map uncompressed files within zip as file-backed to avoid a dirty copy.
+ map.reset(zip_entry->MapDirectlyFromFile(kDexMetadataProfileEntry, error));
+ if (map == nullptr) {
+ LOG(WARNING) << "Can't mmap profile directly; "
+ << "is your ZIP file corrupted? Falling back to extraction.";
+ // Try again with Extraction which still has a chance of recovery.
+ }
+ }
+
+ if (map == nullptr) {
+ // Default path for compressed ZIP entries, and fallback for stored ZIP entries.
+ // TODO(calin) pass along file names to assist with debugging.
+ map.reset(zip_entry->ExtractToMemMap("profile file", kDexMetadataProfileEntry, error));
+ }
+
+ if (map != nullptr) {
+ source->reset(ProfileSource::Create(std::move(map)));
+ return kProfileLoadSuccess;
+ } else {
+ return kProfileLoadBadData;
+ }
+ }
+}
+
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ProfileSource::Read(
+ uint8_t* buffer,
+ size_t byte_count,
+ const std::string& debug_stage,
+ std::string* error) {
+ if (IsMemMap()) {
+ if (mem_map_cur_ + byte_count > mem_map_->Size()) {
+ return kProfileLoadBadData;
+ }
+ for (size_t i = 0; i < byte_count; i++) {
+ buffer[i] = *(mem_map_->Begin() + mem_map_cur_);
+ mem_map_cur_++;
+ }
+ } else {
+ while (byte_count > 0) {
+ int bytes_read = TEMP_FAILURE_RETRY(read(fd_, buffer, byte_count));;
+ if (bytes_read == 0) {
+ *error += "Profile EOF reached prematurely for " + debug_stage;
+ return kProfileLoadBadData;
+ } else if (bytes_read < 0) {
+ *error += "Profile IO error for " + debug_stage + strerror(errno);
+ return kProfileLoadIOError;
+ }
+ byte_count -= bytes_read;
+ buffer += bytes_read;
+ }
+ }
+ return kProfileLoadSuccess;
+}
+
+bool ProfileCompilationInfo::ProfileSource::HasConsumedAllData() const {
+ return IsMemMap()
+ ? (mem_map_ == nullptr || mem_map_cur_ == mem_map_->Size())
+ : (testEOF(fd_) == 0);
+}
+
+bool ProfileCompilationInfo::ProfileSource::HasEmptyContent() const {
+ if (IsMemMap()) {
+ return mem_map_ == nullptr || mem_map_->Size() == 0;
+ } else {
+ struct stat stat_buffer;
+ if (fstat(fd_, &stat_buffer) != 0) {
+ return false;
+ }
+ return stat_buffer.st_size == 0;
+ }
+}
+
// TODO(calin): fail fast if the dex checksums don't match.
-ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::LoadInternal(
- int fd, std::string* error, bool merge_classes) {
+ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::LoadInternal(
+ int32_t fd,
+ std::string* error,
+ bool merge_classes,
+ const ProfileLoadFilterFn& filter_fn) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK_GE(fd, 0);
- struct stat stat_buffer;
- if (fstat(fd, &stat_buffer) != 0) {
- return kProfileLoadIOError;
+ std::unique_ptr<ProfileSource> source;
+ ProfileLoadStatus status = OpenSource(fd, &source, error);
+ if (status != kProfileLoadSuccess) {
+ return status;
}
+
// We allow empty profile files.
// Profiles may be created by ActivityManager or installd before we manage to
// process them in the runtime or profman.
- if (stat_buffer.st_size == 0) {
+ if (source->HasEmptyContent()) {
return kProfileLoadSuccess;
}
+
// Read profile header: magic + version + number_of_dex_files.
uint8_t number_of_dex_files;
uint32_t uncompressed_data_size;
uint32_t compressed_data_size;
- ProfileLoadSatus status = ReadProfileHeader(fd,
- &number_of_dex_files,
- &uncompressed_data_size,
- &compressed_data_size,
- error);
+ status = ReadProfileHeader(*source,
+ &number_of_dex_files,
+ &uncompressed_data_size,
+ &compressed_data_size,
+ error);
if (status != kProfileLoadSuccess) {
return status;
@@ -1192,16 +1300,14 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::LoadInternal(
}
std::unique_ptr<uint8_t[]> compressed_data(new uint8_t[compressed_data_size]);
- bool bytes_read_success =
- android::base::ReadFully(fd, compressed_data.get(), compressed_data_size);
-
- if (testEOF(fd) != 0) {
- *error += "Unexpected data in the profile file.";
- return kProfileLoadBadData;
+ status = source->Read(compressed_data.get(), compressed_data_size, "ReadContent", error);
+ if (status != kProfileLoadSuccess) {
+ *error += "Unable to read compressed profile data";
+ return status;
}
- if (!bytes_read_success) {
- *error += "Unable to read compressed profile data";
+ if (!source->HasConsumedAllData()) {
+ *error += "Unexpected data in the profile file.";
return kProfileLoadBadData;
}
@@ -1231,20 +1337,29 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::LoadInternal(
}
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
- if (!RemapProfileIndex(profile_line_headers, &dex_profile_index_remap)) {
+ if (!RemapProfileIndex(profile_line_headers, filter_fn, &dex_profile_index_remap)) {
return kProfileLoadBadData;
}
for (uint8_t k = 0; k < number_of_dex_files; k++) {
- // Now read the actual profile line.
- status = ReadProfileLine(uncompressed_data,
- number_of_dex_files,
- profile_line_headers[k],
- dex_profile_index_remap,
- merge_classes,
- error);
- if (status != kProfileLoadSuccess) {
- return status;
+ if (!filter_fn(profile_line_headers[k].dex_location, profile_line_headers[k].checksum)) {
+ // We have to skip the line. Advanced the current pointer of the buffer.
+ size_t profile_line_size =
+ profile_line_headers[k].class_set_size +
+ profile_line_headers[k].method_region_size_bytes +
+ DexFileData::ComputeBitmapStorage(profile_line_headers[k].num_method_ids);
+ uncompressed_data.Advance(profile_line_size);
+ } else {
+ // Now read the actual profile line.
+ status = ReadProfileLine(uncompressed_data,
+ number_of_dex_files,
+ profile_line_headers[k],
+ dex_profile_index_remap,
+ merge_classes,
+ error);
+ if (status != kProfileLoadSuccess) {
+ return status;
+ }
}
}
@@ -1259,12 +1374,16 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::LoadInternal(
bool ProfileCompilationInfo::RemapProfileIndex(
const std::vector<ProfileLineHeader>& profile_line_headers,
+ const ProfileLoadFilterFn& filter_fn,
/*out*/SafeMap<uint8_t, uint8_t>* dex_profile_index_remap) {
// First verify that all checksums match. This will avoid adding garbage to
// the current profile info.
// Note that the number of elements should be very small, so this should not
// be a performance issue.
for (const ProfileLineHeader other_profile_line_header : profile_line_headers) {
+ if (!filter_fn(other_profile_line_header.dex_location, other_profile_line_header.checksum)) {
+ continue;
+ }
// verify_checksum is false because we want to differentiate between a missing dex data and
// a mismatched checksum.
const DexFileData* dex_data = FindDexData(other_profile_line_header.dex_location,
@@ -1278,6 +1397,9 @@ bool ProfileCompilationInfo::RemapProfileIndex(
// All checksums match. Import the data.
uint32_t num_dex_files = static_cast<uint32_t>(profile_line_headers.size());
for (uint32_t i = 0; i < num_dex_files; i++) {
+ if (!filter_fn(profile_line_headers[i].dex_location, profile_line_headers[i].checksum)) {
+ continue;
+ }
const DexFileData* dex_data = GetOrAddDexFileData(profile_line_headers[i].dex_location,
profile_line_headers[i].checksum,
profile_line_headers[i].num_method_ids);
@@ -1904,4 +2026,64 @@ std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
return ret;
}
+bool ProfileCompilationInfo::IsProfileFile(int fd) {
+ // First check if it's an empty file as we allow empty profile files.
+ // Profiles may be created by ActivityManager or installd before we manage to
+ // process them in the runtime or profman.
+ struct stat stat_buffer;
+ if (fstat(fd, &stat_buffer) != 0) {
+ return false;
+ }
+
+ if (stat_buffer.st_size == 0) {
+ return true;
+ }
+
+ // The files is not empty. Check if it contains the profile magic.
+ size_t byte_count = sizeof(kProfileMagic);
+ uint8_t buffer[sizeof(kProfileMagic)];
+ if (!android::base::ReadFully(fd, buffer, byte_count)) {
+ return false;
+ }
+
+ // Reset the offset to prepare the file for reading.
+ off_t rc = TEMP_FAILURE_RETRY(lseek(fd, 0, SEEK_SET));
+ if (rc == static_cast<off_t>(-1)) {
+ PLOG(ERROR) << "Failed to reset the offset";
+ return false;
+ }
+
+ return memcmp(buffer, kProfileMagic, byte_count) == 0;
+}
+
+bool ProfileCompilationInfo::UpdateProfileKeys(
+ const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ for (DexFileData* dex_data : info_) {
+ if (dex_data->checksum == dex_file->GetLocationChecksum()
+ && dex_data->num_method_ids == dex_file->NumMethodIds()) {
+ std::string new_profile_key = GetProfileDexFileKey(dex_file->GetLocation());
+ if (dex_data->profile_key != new_profile_key) {
+ if (profile_key_map_.find(new_profile_key) != profile_key_map_.end()) {
+ // We can't update the key if the new key belongs to a different dex file.
+ LOG(ERROR) << "Cannot update profile key to " << new_profile_key
+ << " because the new key belongs to another dex file.";
+ return false;
+ }
+ profile_key_map_.erase(dex_data->profile_key);
+ profile_key_map_.Put(new_profile_key, dex_data->profile_index);
+ dex_data->profile_key = new_profile_key;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool ProfileCompilationInfo::ProfileFilterFnAcceptAll(
+ const std::string& dex_location ATTRIBUTE_UNUSED,
+ uint32_t checksum ATTRIBUTE_UNUSED) {
+ return true;
+}
+
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 7c30dee0c0..1973f3f09e 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -28,6 +28,7 @@
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
#include "method_reference.h"
+#include "mem_map.h"
#include "safe_map.h"
#include "type_reference.h"
@@ -71,6 +72,8 @@ class ProfileCompilationInfo {
static const uint8_t kProfileMagic[];
static const uint8_t kProfileVersion[];
+ static const char* kDexMetadataProfileEntry;
+
// Data structures for encoding the offline representation of inline caches.
// This is exposed as public in order to make it available to dex2oat compilations
// (see compiler/optimizing/inliner.cc).
@@ -303,7 +306,19 @@ class ProfileCompilationInfo {
// Load or Merge profile information from the given file descriptor.
// If the current profile is non-empty the load will fail.
// If merge_classes is set to false, classes will not be merged/loaded.
- bool Load(int fd, bool merge_classes = true);
+ // If filter_fn is present, it will be used to filter out profile data belonging
+ // to dex file which do not comply with the filter
+ // (i.e. for which filter_fn(dex_location, dex_checksum) is false).
+ using ProfileLoadFilterFn = std::function<bool(const std::string&, uint32_t)>;
+ // Profile filter method which accepts all dex locations.
+ // This is convenient to use when we need to accept all locations without repeating the same
+ // lambda.
+ static bool ProfileFilterFnAcceptAll(const std::string& dex_location, uint32_t checksum);
+
+ bool Load(
+ int fd,
+ bool merge_classes = true,
+ const ProfileLoadFilterFn& filter_fn = ProfileFilterFnAcceptAll);
// Verify integrity of the profile file with the provided dex files.
// If there exists a DexData object which maps to a dex_file, then it verifies that:
@@ -410,8 +425,22 @@ class ProfileCompilationInfo {
// Return all of the class descriptors in the profile for a set of dex files.
std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
+ // Return true if the fd points to a profile file.
+ bool IsProfileFile(int fd);
+
+ // Update the profile keys corresponding to the given dex files based on their current paths.
+ // This method allows fix-ups in the profile for dex files that might have been renamed.
+ // The new profile key will be constructed based on the current dex location.
+ //
+ // The matching [profile key <-> dex_file] is done based on the dex checksum and the number of
+ // methods ids. If neither is a match then the profile key is not updated.
+ //
+ // If the new profile key would collide with an existing key (for a different dex)
+ // the method returns false. Otherwise it returns true.
+ bool UpdateProfileKeys(const std::vector<std::unique_ptr<const DexFile>>& dex_files);
+
private:
- enum ProfileLoadSatus {
+ enum ProfileLoadStatus {
kProfileLoadWouldOverwiteData,
kProfileLoadIOError,
kProfileLoadVersionMismatch,
@@ -442,14 +471,21 @@ class ProfileCompilationInfo {
class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
num_method_ids(num_methods),
bitmap_storage(allocator->Adapter(kArenaAllocProfile)) {
- const size_t num_bits = num_method_ids * kBitmapIndexCount;
- bitmap_storage.resize(RoundUp(num_bits, kBitsPerByte) / kBitsPerByte);
+ bitmap_storage.resize(ComputeBitmapStorage(num_method_ids));
if (!bitmap_storage.empty()) {
method_bitmap =
- BitMemoryRegion(MemoryRegion(&bitmap_storage[0], bitmap_storage.size()), 0, num_bits);
+ BitMemoryRegion(MemoryRegion(
+ &bitmap_storage[0], bitmap_storage.size()), 0, ComputeBitmapBits(num_method_ids));
}
}
+ static size_t ComputeBitmapBits(uint32_t num_method_ids) {
+ return num_method_ids * kBitmapIndexCount;
+ }
+ static size_t ComputeBitmapStorage(uint32_t num_method_ids) {
+ return RoundUp(ComputeBitmapBits(num_method_ids), kBitsPerByte) / kBitsPerByte;
+ }
+
bool operator==(const DexFileData& other) const {
return checksum == other.checksum && method_map == other.method_map;
}
@@ -577,6 +613,58 @@ class ProfileCompilationInfo {
uint32_t num_method_ids;
};
+ /**
+ * Encapsulate the source of profile data for loading.
+ * The source can be either a plain file or a zip file.
+ * For zip files, the profile entry will be extracted to
+ * the memory map.
+ */
+ class ProfileSource {
+ public:
+ /**
+ * Create a profile source for the given fd. The ownership of the fd
+ * remains to the caller; as this class will not attempt to close it at any
+ * point.
+ */
+ static ProfileSource* Create(int32_t fd) {
+ DCHECK_GT(fd, -1);
+ return new ProfileSource(fd, /*map*/ nullptr);
+ }
+
+ /**
+ * Create a profile source backed by a memory map. The map can be null in
+ * which case it will the treated as an empty source.
+ */
+ static ProfileSource* Create(std::unique_ptr<MemMap>&& mem_map) {
+ return new ProfileSource(/*fd*/ -1, std::move(mem_map));
+ }
+
+ /**
+ * Read bytes from this source.
+ * Reading will advance the current source position so subsequent
+ * invocations will read from the las position.
+ */
+ ProfileLoadStatus Read(uint8_t* buffer,
+ size_t byte_count,
+ const std::string& debug_stage,
+ std::string* error);
+
+ /** Return true if the source has 0 data. */
+ bool HasEmptyContent() const;
+ /** Return true if all the information from this source has been read. */
+ bool HasConsumedAllData() const;
+
+ private:
+ ProfileSource(int32_t fd, std::unique_ptr<MemMap>&& mem_map)
+ : fd_(fd), mem_map_(std::move(mem_map)), mem_map_cur_(0) {}
+
+ bool IsMemMap() const { return fd_ == -1; }
+
+ int32_t fd_; // The fd is not owned by this class.
+ std::unique_ptr<MemMap> mem_map_;
+ size_t mem_map_cur_; // Current position in the map to read from.
+ };
+
// A helper structure to make sure we don't read past our buffers in the loops.
struct SafeBuffer {
public:
@@ -586,13 +674,9 @@ class ProfileCompilationInfo {
}
// Reads the content of the descriptor at the current position.
- ProfileLoadSatus FillFromFd(int fd,
- const std::string& source,
- /*out*/std::string* error);
-
- ProfileLoadSatus FillFromBuffer(uint8_t* buffer_ptr,
- const std::string& source,
- /*out*/std::string* error);
+ ProfileLoadStatus Fill(ProfileSource& source,
+ const std::string& debug_stage,
+ /*out*/std::string* error);
// Reads an uint value (high bits to low bits) and advances the current pointer
// with the number of bits read.
@@ -620,21 +704,29 @@ class ProfileCompilationInfo {
uint8_t* ptr_current_;
};
- // Entry point for profile loding functionality.
- ProfileLoadSatus LoadInternal(int fd, std::string* error, bool merge_classes = true);
+ ProfileLoadStatus OpenSource(int32_t fd,
+ /*out*/ std::unique_ptr<ProfileSource>* source,
+ /*out*/ std::string* error);
+
+ // Entry point for profile loading functionality.
+ ProfileLoadStatus LoadInternal(
+ int32_t fd,
+ std::string* error,
+ bool merge_classes = true,
+ const ProfileLoadFilterFn& filter_fn = ProfileFilterFnAcceptAll);
// Read the profile header from the given fd and store the number of profile
// lines into number_of_dex_files.
- ProfileLoadSatus ReadProfileHeader(int fd,
- /*out*/uint8_t* number_of_dex_files,
- /*out*/uint32_t* size_uncompressed_data,
- /*out*/uint32_t* size_compressed_data,
- /*out*/std::string* error);
+ ProfileLoadStatus ReadProfileHeader(ProfileSource& source,
+ /*out*/uint8_t* number_of_dex_files,
+ /*out*/uint32_t* size_uncompressed_data,
+ /*out*/uint32_t* size_compressed_data,
+ /*out*/std::string* error);
// Read the header of a profile line from the given fd.
- ProfileLoadSatus ReadProfileLineHeader(SafeBuffer& buffer,
- /*out*/ProfileLineHeader* line_header,
- /*out*/std::string* error);
+ ProfileLoadStatus ReadProfileLineHeader(SafeBuffer& buffer,
+ /*out*/ProfileLineHeader* line_header,
+ /*out*/std::string* error);
// Read individual elements from the profile line header.
bool ReadProfileLineHeaderElements(SafeBuffer& buffer,
@@ -643,12 +735,12 @@ class ProfileCompilationInfo {
/*out*/std::string* error);
// Read a single profile line from the given fd.
- ProfileLoadSatus ReadProfileLine(SafeBuffer& buffer,
- uint8_t number_of_dex_files,
- const ProfileLineHeader& line_header,
- const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
- bool merge_classes,
- /*out*/std::string* error);
+ ProfileLoadStatus ReadProfileLine(SafeBuffer& buffer,
+ uint8_t number_of_dex_files,
+ const ProfileLineHeader& line_header,
+ const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
+ bool merge_classes,
+ /*out*/std::string* error);
// Read all the classes from the buffer into the profile `info_` structure.
bool ReadClasses(SafeBuffer& buffer,
@@ -665,6 +757,7 @@ class ProfileCompilationInfo {
// The method generates mapping of profile indices while merging a new profile
// data into current data. It returns true, if the mapping was successful.
bool RemapProfileIndex(const std::vector<ProfileLineHeader>& profile_line_headers,
+ const ProfileLoadFilterFn& filter_fn,
/*out*/SafeMap<uint8_t, uint8_t>* dex_profile_index_remap);
// Read the inline cache encoding from line_bufer into inline_cache.
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 08042cc890..4ac11ee422 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -15,12 +15,14 @@
*/
#include <gtest/gtest.h>
+#include <stdio.h>
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "common_runtime_test.h"
#include "dex/dex_file.h"
+#include "dex/dex_file_loader.h"
#include "handle_scope-inl.h"
#include "jit/profile_compilation_info.h"
#include "linear_alloc.h"
@@ -29,6 +31,7 @@
#include "mirror/class_loader.h"
#include "scoped_thread_state_change-inl.h"
#include "type_reference.h"
+#include "ziparchive/zip_writer.h"
namespace art {
@@ -268,6 +271,53 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
}
+ void TestProfileLoadFromZip(const char* zip_entry,
+ size_t zip_flags,
+ bool should_succeed,
+ bool should_succeed_with_empty_profile = false) {
+ // Create a valid profile.
+ ScratchFile profile;
+ ProfileCompilationInfo saved_info;
+ for (uint16_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &saved_info));
+ }
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Prepare the profile content for zipping.
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ std::vector<uint8_t> data(profile.GetFile()->GetLength());
+ ASSERT_TRUE(profile.GetFile()->ReadFully(data.data(), data.size()));
+
+ // Zip the profile content.
+ ScratchFile zip;
+ FILE* file = fopen(zip.GetFile()->GetPath().c_str(), "wb");
+ ZipWriter writer(file);
+ writer.StartEntry(zip_entry, zip_flags);
+ writer.WriteBytes(data.data(), data.size());
+ writer.FinishEntry();
+ writer.Finish();
+ fflush(file);
+ fclose(file);
+
+ // Verify loading from the zip archive.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(zip.GetFile()->ResetOffset());
+ ASSERT_EQ(should_succeed, loaded_info.Load(zip.GetFile()->GetPath(), false));
+ if (should_succeed) {
+ if (should_succeed_with_empty_profile) {
+ ASSERT_TRUE(loaded_info.IsEmpty());
+ } else {
+ ASSERT_TRUE(loaded_info.Equals(saved_info));
+ }
+ }
+ }
+
+ bool IsEmpty(const ProfileCompilationInfo& info) {
+ return info.IsEmpty();
+ }
+
// Cannot sizeof the actual arrays so hard code the values here.
// They should not change anyway.
static constexpr int kProfileMagicSize = 4;
@@ -934,4 +984,313 @@ TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
}
}
+TEST_F(ProfileCompilationInfoTest, LoadFromZipCompress) {
+ TestProfileLoadFromZip("primary.prof",
+ ZipWriter::kCompress | ZipWriter::kAlign32,
+ /*should_succeed*/true);
+}
+
+TEST_F(ProfileCompilationInfoTest, LoadFromZipUnCompress) {
+ TestProfileLoadFromZip("primary.prof",
+ ZipWriter::kAlign32,
+ /*should_succeed*/true);
+}
+
+TEST_F(ProfileCompilationInfoTest, LoadFromZipUnAligned) {
+ TestProfileLoadFromZip("primary.prof",
+ 0,
+ /*should_succeed*/true);
+}
+
+TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadZipEntry) {
+ TestProfileLoadFromZip("invalid.profile.entry",
+ 0,
+ /*should_succeed*/true,
+ /*should_succeed_with_empty_profile*/true);
+}
+
+TEST_F(ProfileCompilationInfoTest, LoadFromZipFailBadProfile) {
+ // Create a bad profile.
+ ScratchFile profile;
+ ASSERT_TRUE(profile.GetFile()->WriteFully(
+ ProfileCompilationInfo::kProfileMagic, kProfileMagicSize));
+ ASSERT_TRUE(profile.GetFile()->WriteFully(
+ ProfileCompilationInfo::kProfileVersion, kProfileVersionSize));
+ // Write that we have at least one line.
+ uint8_t line_number[] = { 0, 1 };
+ ASSERT_TRUE(profile.GetFile()->WriteFully(line_number, sizeof(line_number)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Prepare the profile content for zipping.
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ std::vector<uint8_t> data(profile.GetFile()->GetLength());
+ ASSERT_TRUE(profile.GetFile()->ReadFully(data.data(), data.size()));
+
+ // Zip the profile content.
+ ScratchFile zip;
+ FILE* file = fopen(zip.GetFile()->GetPath().c_str(), "wb");
+ ZipWriter writer(file);
+ writer.StartEntry("primary.prof", ZipWriter::kAlign32);
+ writer.WriteBytes(data.data(), data.size());
+ writer.FinishEntry();
+ writer.Finish();
+ fflush(file);
+ fclose(file);
+
+ // Check that we failed to load.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(zip.GetFile()->ResetOffset());
+ ASSERT_FALSE(loaded_info.Load(GetFd(zip)));
+}
+
+TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOk) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+
+ ProfileCompilationInfo info;
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ // Create the profile with a different location so that we can update it to the
+ // real dex location later.
+ std::string base_location = DexFileLoader::GetBaseLocation(dex->GetLocation());
+ std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex->GetLocation());
+ std::string old_name = base_location + "-old" + multidex_suffix;
+ info.AddMethodIndex(Hotness::kFlagHot,
+ old_name,
+ dex->GetLocationChecksum(),
+ /* method_idx */ 0,
+ dex->NumMethodIds());
+ }
+
+ // Update the profile keys based on the original dex files
+ ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
+
+ // Verify that we find the methods when searched with the original dex files.
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ ASSERT_TRUE(loaded_pmi != nullptr);
+ }
+}
+
+TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButNoUpdate) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+
+ ProfileCompilationInfo info;
+ info.AddMethodIndex(Hotness::kFlagHot,
+ "my.app",
+ /* checksum */ 123,
+ /* method_idx */ 0,
+ /* num_method_ids */ 10);
+
+ // Update the profile keys based on the original dex files
+ ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
+
+ // Verify that we did not perform any update and that we cannot find anything with the new
+ // location.
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
+ info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* method_idx */ 0);
+ ASSERT_TRUE(loaded_pmi == nullptr);
+ }
+
+ // Verify that we can find the original entry.
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
+ info.GetMethod("my.app", /* checksum */ 123, /* method_idx */ 0);
+ ASSERT_TRUE(loaded_pmi != nullptr);
+}
+
+TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyFail) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+
+
+ ProfileCompilationInfo info;
+ // Add all dex
+ for (const std::unique_ptr<const DexFile>& dex : dex_files) {
+ // Create the profile with a different location so that we can update it to the
+ // real dex location later.
+ std::string base_location = DexFileLoader::GetBaseLocation(dex->GetLocation());
+ std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex->GetLocation());
+ std::string old_name = base_location + "-old" + multidex_suffix;
+ info.AddMethodIndex(Hotness::kFlagHot,
+ old_name,
+ dex->GetLocationChecksum(),
+ /* method_idx */ 0,
+ dex->NumMethodIds());
+ }
+
+ // Add a method index using the location we want to rename to.
+ // This will cause the rename to fail because an existing entry would already have that name.
+ info.AddMethodIndex(Hotness::kFlagHot,
+ dex_files[0]->GetLocation(),
+ /* checksum */ 123,
+ /* method_idx */ 0,
+ dex_files[0]->NumMethodIds());
+
+ ASSERT_FALSE(info.UpdateProfileKeys(dex_files));
+}
+
+TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ // Add a method which is part of the same dex file as one of the class from the inline caches.
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ // Add a method which is outside the set of dex files.
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Filter out dex locations. Keep only dex_location1 and dex_location2.
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [](const std::string& dex_location, uint32_t checksum) -> bool {
+ return (dex_location == "dex_location1" && checksum == 1)
+ || (dex_location == "dex_location3" && checksum == 3);
+ };
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile), true, filter_fn));
+
+ // Verify that we filtered out locations during load.
+
+ // Dex location 2 and 4 should have been filtered out
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2", /* checksum */ 2, method_idx));
+ ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx));
+ }
+
+ // Dex location 1 should have all all the inline caches referencing dex location 2 set to
+ // missing types.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ // The methods for dex location 1 should be in the profile data.
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ loaded_info.GetMethod("dex_location1", /* checksum */ 1, /* method_idx */ method_idx);
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+
+ // Verify the inline cache.
+ // Everything should be as constructed by GetOfflineProfileMethodInfo with the exception
+ // of the inline caches referring types from dex_location2.
+ // These should be set to IsMissingType.
+ ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
+
+ // Monomorphic types should remain the same as dex_location1 was kept.
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
+ dex_pc_data.AddClass(0, dex::TypeIndex(0));
+ ic_map->Put(dex_pc, dex_pc_data);
+ }
+ // Polymorphic inline cache should have been transformed to IsMissingType due to
+ // the removal of dex_location2.
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
+ dex_pc_data.SetIsMissingTypes();
+ ic_map->Put(dex_pc, dex_pc_data);
+ }
+
+ // Megamorphic are not affected by removal of dex files.
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
+ dex_pc_data.SetIsMegamorphic();
+ ic_map->Put(dex_pc, dex_pc_data);
+ }
+ // Missing types are not affected be removal of dex files.
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
+ dex_pc_data.SetIsMissingTypes();
+ ic_map->Put(dex_pc, dex_pc_data);
+ }
+
+ ProfileCompilationInfo::OfflineProfileMethodInfo expected_pmi(ic_map);
+
+ // The dex references should not have dex_location2 in the list.
+ expected_pmi.dex_references.emplace_back("dex_location1", /* checksum */1, kMaxMethodIds);
+ expected_pmi.dex_references.emplace_back("dex_location3", /* checksum */3, kMaxMethodIds);
+
+ // Now check that we get back what we expect.
+ ASSERT_TRUE(*loaded_pmi1 == expected_pmi);
+ }
+}
+
+TEST_F(ProfileCompilationInfoTest, FilteredLoadingRemoveAll) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ // Add a method which is part of the same dex file as one of the class from the inline caches.
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, method_idx, pmi, &saved_info));
+ // Add a method which is outside the set of dex files.
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Remove all elements.
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [](const std::string&, uint32_t) -> bool { return false; };
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile), true, filter_fn));
+
+ // Verify that we filtered out everything.
+ ASSERT_TRUE(IsEmpty(loaded_info));
+}
+
+TEST_F(ProfileCompilationInfoTest, FilteredLoadingKeepAll) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ // Add a method which is part of the same dex file as one of the
+ // class from the inline caches.
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ // Add a method which is outside the set of dex files.
+ ASSERT_TRUE(AddMethod("dex_location4", /* checksum */ 4, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Keep all elements.
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [](const std::string&, uint32_t) -> bool { return true; };
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile), true, filter_fn));
+
+
+ ASSERT_TRUE(loaded_info.Equals(saved_info));
+
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
+ loaded_info.GetMethod("dex_location1", /* checksum */ 1, method_idx);
+ ASSERT_TRUE(loaded_pmi1 != nullptr);
+ ASSERT_TRUE(*loaded_pmi1 == pmi);
+ }
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
+ loaded_info.GetMethod("dex_location4", /* checksum */ 4, method_idx);
+ ASSERT_TRUE(loaded_pmi2 != nullptr);
+ ASSERT_TRUE(*loaded_pmi2 == pmi);
+ }
+}
+
} // namespace art
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index b8e6ebe8d8..666fb98354 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -33,7 +33,9 @@
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex/dex_file-inl.h"
+#include "dex/utf.h"
#include "fault_handler.h"
+#include "hidden_api.h"
#include "gc/accounting/card_table-inl.h"
#include "gc_root.h"
#include "indirect_reference_table-inl.h"
@@ -56,7 +58,6 @@
#include "safe_map.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
-#include "utf.h"
#include "well_known_classes.h"
namespace {
@@ -79,6 +80,19 @@ namespace art {
// things not rendering correctly. E.g. b/16858794
static constexpr bool kWarnJniAbort = false;
+static bool IsCallerInBootClassPath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> klass = GetCallingClass(self, /* num_frames */ 1);
+ // If `klass` is null, it is an unattached native thread. Assume this is
+ // *not* boot class path.
+ return klass != nullptr && klass->IsBootStrapClassLoaded();
+}
+
+template<typename T>
+ALWAYS_INLINE static bool ShouldBlockAccessToMember(T* member, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return hiddenapi::ShouldBlockAccessToMember(member, self, IsCallerInBootClassPath);
+}
+
// Helpers to call instrumentation functions for fields. These take jobjects so we don't need to set
// up handles for the rare case where these actually do something. Once these functions return it is
// possible there will be a pending exception if the instrumentation happens to throw one.
@@ -238,6 +252,9 @@ static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
} else {
method = c->FindClassMethod(name, sig, pointer_size);
}
+ if (method != nullptr && ShouldBlockAccessToMember(method, soa.Self())) {
+ method = nullptr;
+ }
if (method == nullptr || method->IsStatic() != is_static) {
ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
return nullptr;
@@ -314,6 +331,9 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
} else {
field = c->FindInstanceField(name, field_type->GetDescriptor(&temp));
}
+ if (field != nullptr && ShouldBlockAccessToMember(field, soa.Self())) {
+ field = nullptr;
+ }
if (field == nullptr) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 2bfed7f539..07eadc1ddf 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -55,6 +55,10 @@ static inline uint32_t DecodeUnsignedLeb128(const uint8_t** data) {
return static_cast<uint32_t>(result);
}
+static inline uint32_t DecodeUnsignedLeb128WithoutMovingCursor(const uint8_t* data) {
+ return DecodeUnsignedLeb128(&data);
+}
+
static inline bool DecodeUnsignedLeb128Checked(const uint8_t** data,
const void* end,
uint32_t* out) {
@@ -203,6 +207,34 @@ static inline uint32_t UnsignedLeb128Size(uint32_t data) {
return (x * 37) >> 8;
}
+static inline bool IsLeb128Terminator(const uint8_t* ptr) {
+ return *ptr <= 0x7f;
+}
+
+// Returns the first byte of a Leb128 value assuming that:
+// (1) `end_ptr` points to the first byte after the Leb128 value, and
+// (2) there is another Leb128 value before this one.
+template <typename T>
+static inline T* ReverseSearchUnsignedLeb128(T* end_ptr) {
+ static_assert(std::is_same<typename std::remove_const<T>::type, uint8_t>::value,
+ "T must be a uint8_t");
+ T* ptr = end_ptr;
+
+ // Move one byte back, check that this is the terminating byte.
+ ptr--;
+ DCHECK(IsLeb128Terminator(ptr));
+
+ // Keep moving back while the previous byte is not a terminating byte.
+ // Fail after reading five bytes in case there isn't another Leb128 value
+ // before this one.
+ while (!IsLeb128Terminator(ptr - 1)) {
+ ptr--;
+ DCHECK_LE(static_cast<ptrdiff_t>(end_ptr - ptr), 5);
+ }
+
+ return ptr;
+}
+
// Returns the number of bytes needed to encode the value in unsigned LEB128.
static inline uint32_t SignedLeb128Size(int32_t data) {
// Like UnsignedLeb128Size(), but we need one bit beyond the highest bit that differs from sign.
@@ -241,7 +273,7 @@ static inline void EncodeUnsignedLeb128(Vector* dest, uint32_t value) {
static inline void UpdateUnsignedLeb128(uint8_t* dest, uint32_t value) {
const uint8_t* old_end = dest;
uint32_t old_value = DecodeUnsignedLeb128(&old_end);
- DCHECK_LE(value, old_value);
+ DCHECK_LE(UnsignedLeb128Size(value), UnsignedLeb128Size(old_value));
for (uint8_t* end = EncodeUnsignedLeb128(dest, value); end < old_end; end++) {
// Use longer encoding than necessary to fill the allocated space.
end[-1] |= 0x80;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 8abf8a6003..26acef06d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -396,6 +396,91 @@ MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
+template<typename A, typename B>
+static ptrdiff_t PointerDiff(A* a, B* b) {
+ return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
+}
+
+bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+#if !HAVE_MREMAP_SYSCALL
+ UNUSED(source_ptr);
+ *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
+ return false;
+#else // !HAVE_MREMAP_SYSCALL
+ CHECK(source_ptr != nullptr);
+ CHECK(*source_ptr != nullptr);
+ if (!MemMap::kCanReplaceMapping) {
+ *error = "Unable to perform atomic replace due to runtime environment!";
+ return false;
+ }
+ MemMap* source = *source_ptr;
+ // neither can be reuse.
+ if (source->reuse_ || reuse_) {
+ *error = "One or both mappings is not a real mmap!";
+ return false;
+ }
+ // TODO Support redzones.
+ if (source->redzone_size_ != 0 || redzone_size_ != 0) {
+ *error = "source and dest have different redzone sizes";
+ return false;
+ }
+ // Make sure they have the same offset from the actual mmap'd address
+ if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
+ *error =
+ "source starts at a different offset from the mmap. Cannot atomically replace mappings";
+ return false;
+ }
+ // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
+ // memcpy but the check is explicit and actually done).
+ if (source->BaseBegin() > BaseBegin() &&
+ reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
+ reinterpret_cast<uint8_t*>(source->BaseBegin())) {
+ *error = "destination memory pages overlap with source memory pages";
+ return false;
+ }
+ // Change the protection to match the new location.
+ int old_prot = source->GetProtect();
+ if (!source->Protect(GetProtect())) {
+ *error = "Could not change protections for source to those required for dest.";
+ return false;
+ }
+
+ // Do the mremap.
+ void* res = mremap(/*old_address*/source->BaseBegin(),
+ /*old_size*/source->BaseSize(),
+ /*new_size*/source->BaseSize(),
+ /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
+ /*new_address*/BaseBegin());
+ if (res == MAP_FAILED) {
+ int saved_errno = errno;
+ // Wasn't able to move mapping. Change the protection of source back to the original one and
+ // return.
+ source->Protect(old_prot);
+ *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
+ return false;
+ }
+ CHECK(res == BaseBegin());
+
+ // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
+ // them later.
+ size_t new_base_size = std::max(source->base_size_, base_size_);
+
+ // Delete the old source, don't unmap it though (set reuse) since it is already gone.
+ *source_ptr = nullptr;
+ size_t source_size = source->size_;
+ source->already_unmapped_ = true;
+ delete source;
+ source = nullptr;
+
+ size_ = source_size;
+ base_size_ = new_base_size;
+ // Reduce base_size if needed (this will unmap the extra pages).
+ SetSize(source_size);
+
+ return true;
+#endif // !HAVE_MREMAP_SYSCALL
+}
+
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t byte_count,
int prot,
@@ -499,9 +584,11 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!already_unmapped_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
}
@@ -523,7 +610,7 @@ MemMap::~MemMap() {
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse, size_t redzone_size)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
+ prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -794,19 +881,21 @@ void MemMap::Shutdown() {
}
void MemMap::SetSize(size_t new_size) {
- if (new_size == base_size_) {
+ CHECK_LE(new_size, size_);
+ size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
+ kPageSize);
+ if (new_base_size == base_size_) {
+ size_ = new_size;
return;
}
- CHECK_ALIGNED(new_size, kPageSize);
- CHECK_EQ(base_size_, size_) << "Unsupported";
- CHECK_LE(new_size, base_size_);
+ CHECK_LT(new_base_size, base_size_);
MEMORY_TOOL_MAKE_UNDEFINED(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
- new_size),
- base_size_ - new_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
- base_size_ - new_size), 0) << new_size << " " << base_size_;
- base_size_ = new_size;
+ new_base_size),
+ base_size_ - new_base_size);
+ CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ base_size_ = new_base_size;
size_ = new_size;
}
@@ -927,9 +1016,6 @@ void* MemMap::MapInternal(void* addr,
UNUSED(low_4gb);
#endif
DCHECK_ALIGNED(length, kPageSize);
- if (low_4gb) {
- DCHECK_EQ(flags & MAP_FIXED, 0);
- }
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963eac..0ecb414614 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -39,8 +39,12 @@ namespace art {
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
+#define HAVE_MREMAP_SYSCALL true
#else
static constexpr bool kMadviseZeroes = false;
+// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
+// present.
+#define HAVE_MREMAP_SYSCALL false
#endif
// Used to keep track of mmap segments.
@@ -52,6 +56,32 @@ static constexpr bool kMadviseZeroes = false;
// Otherwise, calls might see uninitialized values.
class MemMap {
public:
+ static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+
+ // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
+ // relinquishes ownership of the source mmap.
+ //
+ // For the call to be successful:
+ // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
+ // [source->Begin(), source->End()].
+ // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
+ // with them.
+ // * kCanReplaceMapping must be true.
+ // * Neither source nor dest may use manual redzones.
+ // * Both source and dest must have the same offset from the nearest page boundary.
+ // * mremap must succeed when called on the mappings.
+ //
+ // If this call succeeds it will return true and:
+ // * Deallocate *source
+ // * Sets *source to nullptr
+ // * The protection of this will remain the same.
+ // * The size of this will be the size of the source
+ // * The data in this will be the data from source.
+ //
+ // If this call fails it will return false and make no changes to *source or this. The ownership
+ // of the source mmap is returned to the caller.
+ bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
@@ -246,6 +276,9 @@ class MemMap {
// unmapping.
const bool reuse_;
+ // When already_unmapped_ is true the destructor will not call munmap.
+ bool already_unmapped_;
+
const size_t redzone_size_;
#if USE_ART_LOW_4G_ALLOCATOR
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a4ebb16d09..3adbf18a7a 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <memory>
+#include <random>
#include "base/memory_tool.h"
#include "base/unix_file/fd_file.h"
@@ -36,6 +37,25 @@ class MemMapTest : public CommonRuntimeTest {
return mem_map->base_size_;
}
+ static bool IsAddressMapped(void* addr) {
+ bool res = msync(addr, 1, MS_SYNC) == 0;
+ if (!res && errno != ENOMEM) {
+ PLOG(FATAL) << "Unexpected error occurred on msync";
+ }
+ return res;
+ }
+
+ static std::vector<uint8_t> RandomData(size_t size) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint8_t> dist;
+ std::vector<uint8_t> res;
+ res.resize(size);
+ for (size_t i = 0; i < size; i++) {
+ res[i] = dist(rd);
+ }
+ return res;
+ }
+
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
@@ -143,6 +163,186 @@ TEST_F(MemMapTest, Start) {
}
#endif
+// We need mremap to be able to test ReplaceMapping at all
+#if HAVE_MREMAP_SYSCALL
+TEST_F(MemMapTest, ReplaceMapping_SameSize) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ void* source_addr = source->Begin();
+ void* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source with random data.
+ std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ // Make the dest smaller so that we know we'll have space.
+ dest->SetSize(kPageSize);
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), kPageSize);
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(
+ MemMap::MapAnonymous(
+ "MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ // the way we we move source.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ // Resize down to 1 page so we can remap the rest.
+ dest->SetSize(kPageSize);
+ // Create source from the last 2 pages
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest->Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ MemMap* orig_source = source;
+ ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source and dest with random data.
+ std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+ std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_TRUE(source == orig_source);
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(source->Size(), data.size());
+ ASSERT_EQ(dest->Size(), dest_data.size());
+
+ ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
+
+ delete source;
+}
+#endif // HAVE_MREMAP_SYSCALL
+
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 88f30a8900..2701ec66a4 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -231,7 +231,7 @@ bool ConvertJValueCommon(
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::Class> h_to(hs.NewHandle(to));
Handle<mirror::Object> h_obj(hs.NewHandle(src_value.GetL()));
- if (h_obj != nullptr && !to->IsAssignableFrom(h_obj->GetClass())) {
+ if (UNLIKELY(!h_obj.IsNull() && !to->IsAssignableFrom(h_obj->GetClass()))) {
ThrowClassCastException(h_to.Get(), h_obj->GetClass());
return false;
}
@@ -246,7 +246,7 @@ bool ConvertJValueCommon(
Primitive::Type type;
if (!GetUnboxedPrimitiveType(to, &type)) {
ObjPtr<mirror::Class> boxed_from_class = GetBoxedPrimitiveClass(from_type);
- if (boxed_from_class->IsSubClass(to)) {
+ if (LIKELY(boxed_from_class->IsSubClass(to))) {
type = from_type;
} else {
ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
@@ -259,7 +259,7 @@ bool ConvertJValueCommon(
return false;
}
- if (!ConvertPrimitiveValueNoThrow(from_type, type, src_value, value)) {
+ if (UNLIKELY(!ConvertPrimitiveValueNoThrow(from_type, type, src_value, value))) {
ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
return false;
}
@@ -274,7 +274,7 @@ bool ConvertJValueCommon(
DCHECK(IsPrimitiveType(to_type));
ObjPtr<mirror::Object> from_obj(src_value.GetL());
- if (UNLIKELY(from_obj == nullptr)) {
+ if (UNLIKELY(from_obj.IsNull())) {
ThrowNullPointerException(
StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
from->PrettyDescriptor().c_str()).c_str());
@@ -289,7 +289,14 @@ bool ConvertJValueCommon(
}
if (UNLIKELY(!ConvertPrimitiveValueNoThrow(unboxed_type, to_type, unboxed_value, value))) {
- ThrowClassCastException(from, to);
+ if (from->IsAssignableFrom(GetBoxedPrimitiveClass(to_type))) {
+ // CallSite may be Number, but the Number object is
+ // incompatible, e.g. Number (Integer) for a short.
+ ThrowClassCastException(from, to);
+ } else {
+ // CallSite is incompatible, e.g. Integer for a short.
+ ThrowWrongMethodTypeException(callee_type.Get(), callsite_type.Get());
+ }
return false;
}
@@ -408,7 +415,7 @@ static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
const InstructionOperands* const operands,
JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
// Compute method information.
- CodeItemDataAccessor accessor(called_method);
+ CodeItemDataAccessor accessor(called_method->DexInstructionData());
// Number of registers for the callee's call frame. Note that for non-exact
// invokes, we always derive this information from the callee method. We
@@ -550,7 +557,7 @@ static inline bool MethodHandleInvokeTransform(ArtMethod* called_method,
// - One for the only method argument (an EmulatedStackFrame).
static constexpr size_t kNumRegsForTransform = 2;
- CodeItemDataAccessor accessor(called_method);
+ CodeItemDataAccessor accessor(called_method->DexInstructionData());
DCHECK_EQ(kNumRegsForTransform, accessor.RegistersSize());
DCHECK_EQ(kNumRegsForTransform, accessor.InsSize());
@@ -1034,7 +1041,7 @@ static inline bool MethodHandleInvokeExactInternal(
}
// Compute method information.
- CodeItemDataAccessor accessor(called_method);
+ CodeItemDataAccessor accessor(called_method->DexInstructionData());
uint16_t num_regs;
size_t num_input_regs;
size_t first_dest_reg;
diff --git a/runtime/method_handles_test.cc b/runtime/method_handles_test.cc
new file mode 100644
index 0000000000..a9473421cb
--- /dev/null
+++ b/runtime/method_handles_test.cc
@@ -0,0 +1,382 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "method_handles.h"
+
+#include "class_linker-inl.h"
+#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
+#include "jvalue-inl.h"
+#include "mirror/method_type.h"
+#include "mirror/object_array-inl.h"
+#include "reflection.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+
+namespace art {
+
+namespace {
+ bool IsClassCastException(ObjPtr<mirror::Throwable> throwable)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return throwable->GetClass()->DescriptorEquals("Ljava/lang/ClassCastException;");
+ }
+
+ bool IsNullPointerException(ObjPtr<mirror::Throwable> throwable)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return throwable->GetClass()->DescriptorEquals("Ljava/lang/NullPointerException;");
+ }
+
+ bool IsWrongMethodTypeException(ObjPtr<mirror::Throwable> throwable)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return throwable->GetClass()->DescriptorEquals("Ljava/lang/invoke/WrongMethodTypeException;");
+ }
+
+ static mirror::MethodType* CreateVoidMethodType(Thread* self,
+ Handle<mirror::Class> parameter_type)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(self);
+ ObjPtr<mirror::Class> class_type = mirror::Class::GetJavaLangClass();
+ ObjPtr<mirror::Class> class_array_type = cl->FindArrayClass(self, &class_type);
+ auto parameter_types = hs.NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(self, class_array_type, 1));
+ parameter_types->Set(0, parameter_type.Get());
+ Handle<mirror::Class> void_class = hs.NewHandle(cl->FindPrimitiveClass('V'));
+ return mirror::MethodType::Create(self, void_class, parameter_types);
+ }
+
+ static bool TryConversion(Thread* self,
+ Handle<mirror::Class> from,
+ Handle<mirror::Class> to,
+ JValue* value)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<2> hs(self);
+ Handle<mirror::MethodType> from_mt = hs.NewHandle(CreateVoidMethodType(self, from));
+ Handle<mirror::MethodType> to_mt = hs.NewHandle(CreateVoidMethodType(self, to));
+ return ConvertJValueCommon(from_mt, to_mt, from.Get(), to.Get(), value);
+ }
+} // namespace
+
+class MethodHandlesTest : public CommonRuntimeTest {};
+
+//
+// Primitive -> Primitive Conversions
+//
+
+TEST_F(MethodHandlesTest, SupportedPrimitiveWideningBI) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('B'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ JValue value = JValue::FromPrimitive(static_cast<int8_t>(3));
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_EQ(3, value.GetI());
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+}
+
+TEST_F(MethodHandlesTest, SupportedPrimitiveWideningCJ) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('C'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('J'));
+ uint16_t raw_value = 0x8000;
+ JValue value = JValue::FromPrimitive(raw_value);
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ ASSERT_EQ(static_cast<int64_t>(raw_value), value.GetJ());
+}
+
+TEST_F(MethodHandlesTest, SupportedPrimitiveWideningIF) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('F'));
+ JValue value = JValue::FromPrimitive(-16);
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ ASSERT_FLOAT_EQ(-16.0f, value.GetF());
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveWideningBC) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('B'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('C'));
+ JValue value;
+ value.SetB(0);
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveWideningSC) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('S'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('C'));
+ JValue value;
+ value.SetS(0x1234);
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveWideningDJ) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('D'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('J'));
+ JValue value;
+ value.SetD(1e72);
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveWideningZI) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('Z'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ JValue value;
+ value.SetZ(true);
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+//
+// Reference -> Reference Conversions
+//
+
+TEST_F(MethodHandlesTest, SupportedReferenceCast) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ static const int32_t kInitialValue = 101;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Object> boxed_value = hs.NewHandle(BoxPrimitive(Primitive::kPrimInt, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(boxed_value->GetClass());
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Number;"));
+ value.SetL(boxed_value.Get());
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ JValue unboxed_value;
+ ASSERT_TRUE(UnboxPrimitiveForResult(value.GetL(), cl->FindPrimitiveClass('I'), &unboxed_value));
+ ASSERT_EQ(kInitialValue, unboxed_value.GetI());
+}
+
+TEST_F(MethodHandlesTest, UnsupportedReferenceCast) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ JValue value = JValue::FromPrimitive(3.733e2);
+ Handle<mirror::Object> boxed_value =
+ hs.NewHandle(BoxPrimitive(Primitive::kPrimDouble, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(boxed_value->GetClass());
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ value.SetL(boxed_value.Get());
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsClassCastException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+//
+// Primitive -> Reference Conversions
+//
+
+TEST_F(MethodHandlesTest, SupportedPrimitiveConversionPrimitiveToBoxed) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ const int32_t kInitialValue = 1;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ JValue unboxed_to_value;
+ ASSERT_TRUE(UnboxPrimitiveForResult(value.GetL(), from.Get(), &unboxed_to_value));
+ ASSERT_EQ(kInitialValue, unboxed_to_value.GetI());
+}
+
+TEST_F(MethodHandlesTest, SupportedPrimitiveConversionPrimitiveToBoxedSuper) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ const int32_t kInitialValue = 1;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Number;"));
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ JValue unboxed_to_value;
+ ASSERT_TRUE(UnboxPrimitiveForResult(value.GetL(), from.Get(), &unboxed_to_value));
+ ASSERT_EQ(kInitialValue, unboxed_to_value.GetI());
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveConversionNotBoxable) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ const int32_t kInitialValue = 1;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Runtime;"));
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveConversionPrimitiveToBoxedWider) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ const int32_t kInitialValue = 1;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Long;"));
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedPrimitiveConversionPrimitiveToBoxedNarrower) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ const int32_t kInitialValue = 1;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Byte;"));
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+//
+// Reference -> Primitive Conversions
+//
+
+TEST_F(MethodHandlesTest, SupportedBoxedToPrimitiveConversion) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ const int32_t kInitialValue = 101;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Object> boxed_value = hs.NewHandle(BoxPrimitive(Primitive::kPrimInt, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ value.SetL(boxed_value.Get());
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_FALSE(soa.Self()->IsExceptionPending());
+ ASSERT_EQ(kInitialValue, value.GetI());
+}
+
+TEST_F(MethodHandlesTest, SupportedBoxedToWiderPrimitiveConversion) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ static const int32_t kInitialValue = 101;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Object> boxed_value = hs.NewHandle(BoxPrimitive(Primitive::kPrimInt, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('J'));
+ value.SetL(boxed_value.Get());
+ ASSERT_TRUE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_EQ(kInitialValue, value.GetJ());
+}
+
+TEST_F(MethodHandlesTest, UnsupportedNullBoxedToPrimitiveConversion) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ JValue value = JValue::FromPrimitive(101);
+ ScopedNullHandle<mirror::Object> boxed_value;
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ value.SetL(boxed_value.Get());
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsNullPointerException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedNotBoxReferenceToPrimitiveConversion) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Class;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('I'));
+ // Set value to be converted as some non-primitive type.
+ JValue value;
+ value.SetL(cl->FindPrimitiveClass('V'));
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedBoxedToNarrowerPrimitiveConversionNoCast) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ static const int32_t kInitialValue = 101;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Object> boxed_value = hs.NewHandle(BoxPrimitive(Primitive::kPrimInt, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Integer;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('S'));
+ value.SetL(boxed_value.Get());
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsWrongMethodTypeException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+TEST_F(MethodHandlesTest, UnsupportedBoxedToNarrowerPrimitiveConversionWithCast) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassLinker* cl = Runtime::Current()->GetClassLinker();
+ StackHandleScope<3> hs(soa.Self());
+ static const double kInitialValue = 1e77;
+ JValue value = JValue::FromPrimitive(kInitialValue);
+ Handle<mirror::Object> boxed_value =
+ hs.NewHandle(BoxPrimitive(Primitive::kPrimDouble, value).Ptr());
+ Handle<mirror::Class> from = hs.NewHandle(cl->FindSystemClass(soa.Self(), "Ljava/lang/Number;"));
+ Handle<mirror::Class> to = hs.NewHandle(cl->FindPrimitiveClass('F'));
+ value.SetL(boxed_value.Get());
+ ASSERT_FALSE(TryConversion(soa.Self(), from, to, &value));
+ ASSERT_TRUE(soa.Self()->IsExceptionPending());
+ ASSERT_TRUE(IsClassCastException(soa.Self()->GetException()));
+ soa.Self()->ClearException();
+}
+
+} // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 302a5e622e..36388eb3aa 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -26,11 +26,12 @@
#include "class_linker.h"
#include "class_loader.h"
#include "common_throws.h"
-#include "dex_cache.h"
#include "dex/dex_file-inl.h"
+#include "dex/invoke_type.h"
+#include "dex_cache.h"
#include "gc/heap-inl.h"
+#include "hidden_api.h"
#include "iftable.h"
-#include "invoke_type.h"
#include "subtype_check.h"
#include "object-inl.h"
#include "object_array.h"
@@ -1143,6 +1144,10 @@ inline bool Class::CanAccessMember(ObjPtr<Class> access_to, uint32_t member_flag
if (this == access_to) {
return true;
}
+ // Do not allow non-boot class path classes access hidden APIs.
+ if (hiddenapi::ShouldBlockAccessToMember(member_flags, this)) {
+ return false;
+ }
// Public members are trivially accessible
if (member_flags & kAccPublic) {
return true;
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 55c588930e..ced7c7c908 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -25,10 +25,10 @@
#include "class_status.h"
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
+#include "dex/modifiers.h"
#include "gc/allocator_type.h"
#include "gc_root.h"
#include "imtable.h"
-#include "modifiers.h"
#include "object.h"
#include "object_array.h"
#include "primitive.h"
@@ -81,9 +81,9 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ClassStatus GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid including "subtype_check_bits_and_status.h" to get the field.
- // The ClassStatus is always in the least-significant bits of status_.
+ // The ClassStatus is always in the 4 most-significant of status_.
return enum_cast<ClassStatus>(
- static_cast<uint32_t>(GetField32Volatile<kVerifyFlags>(StatusOffset())) & 0xff);
+ static_cast<uint32_t>(GetField32Volatile<kVerifyFlags>(StatusOffset())) >> (32 - 4));
}
// This is static because 'this' may be moved by GC.
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 6845575d18..dd09be331a 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -19,8 +19,8 @@
#include "accessible_object.h"
#include "base/enums.h"
+#include "dex/modifiers.h"
#include "gc_root.h"
-#include "modifiers.h"
#include "obj_ptr.h"
#include "object.h"
#include "primitive.h"
diff --git a/runtime/mirror/method_handles_lookup.cc b/runtime/mirror/method_handles_lookup.cc
index a390a2ef53..039bbf2932 100644
--- a/runtime/mirror/method_handles_lookup.cc
+++ b/runtime/mirror/method_handles_lookup.cc
@@ -17,11 +17,11 @@
#include "method_handles_lookup.h"
#include "class-inl.h"
+#include "dex/modifiers.h"
#include "gc_root-inl.h"
#include "handle_scope.h"
#include "jni_internal.h"
#include "mirror/method_handle_impl.h"
-#include "modifiers.h"
#include "object-inl.h"
#include "well_known_classes.h"
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 24c75ec0d8..8c2a49c5f6 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -24,11 +24,11 @@
#include "base/bit_utils.h"
#include "class.h"
#include "common_throws.h"
+#include "dex/utf.h"
#include "gc/heap-inl.h"
#include "globals.h"
#include "runtime.h"
#include "thread.h"
-#include "utf.h"
#include "utils.h"
namespace art {
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 82ff6ddead..cad84ceecb 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -21,6 +21,7 @@
#include "base/array_ref.h"
#include "base/stl_util.h"
#include "class-inl.h"
+#include "dex/utf-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc_root-inl.h"
#include "handle_scope-inl.h"
@@ -29,7 +30,6 @@
#include "runtime.h"
#include "string-inl.h"
#include "thread.h"
-#include "utf-inl.h"
namespace art {
namespace mirror {
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 325591fb53..0c9c65a401 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1378,7 +1378,7 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O
// Is there any reason to believe there's any synchronization in this method?
CHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (accessor.TriesSize() == 0) {
return; // No "tries" implies no synchronization, so no held locks to report.
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index a992b5cb5b..6ea9a7ad62 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,8 +27,10 @@
#include <class_loader_context.h>
#include "common_throws.h"
#include "compiler_filter.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "jit/debugger_interface.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -188,12 +190,13 @@ static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem
dex_mem_map->Begin(),
dex_mem_map->End());
std::string error_message;
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
- 0,
- std::move(dex_mem_map),
- /* verify */ true,
- /* verify_location */ true,
- &error_message));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
+ 0,
+ std::move(dex_mem_map),
+ /* verify */ true,
+ /* verify_location */ true,
+ &error_message));
if (dex_file == nullptr) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("%s", error_message.c_str());
@@ -329,6 +332,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
int32_t i = kDexFileIndexStart; // Oat file is at index 0.
for (const DexFile* dex_file : dex_files) {
if (dex_file != nullptr) {
+ DeregisterDexFileForNative(soa.Self(), dex_file->Begin());
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 400518df20..57a429cf1e 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -74,6 +74,10 @@ static void VMRuntime_startJitCompilation(JNIEnv*, jobject) {
static void VMRuntime_disableJitCompilation(JNIEnv*, jobject) {
}
+static jboolean VMRuntime_hasUsedHiddenApi(JNIEnv*, jobject) {
+ return Runtime::Current()->HasPendingHiddenApiWarning() ? JNI_TRUE : JNI_FALSE;
+}
+
static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaElementClass,
jint length) {
ScopedFastNativeObjectAccess soa(env);
@@ -670,6 +674,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, clearGrowthLimit, "()V"),
NATIVE_METHOD(VMRuntime, concurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, disableJitCompilation, "()V"),
+ NATIVE_METHOD(VMRuntime, hasUsedHiddenApi, "()Z"),
NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
FAST_NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
FAST_NATIVE_METHOD(VMRuntime, isNativeDebuggable, "()Z"),
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 3e8040bfa5..ed0eb97da1 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -160,12 +160,22 @@ static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject jav
return Thread::InternalStackTraceToStackTraceElementArray(soa, trace);
}
+static jobjectArray VMStack_getAnnotatedThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto fn = [](Thread* thread, const ScopedFastNativeObjectAccess& soaa)
+ REQUIRES_SHARED(Locks::mutator_lock_) -> jobjectArray {
+ return thread->CreateAnnotatedStackTrace(soaa);
+ };
+ return GetThreadStack(soa, javaThread, fn);
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMStack, fillStackTraceElements, "(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
FAST_NATIVE_METHOD(VMStack, getCallingClassLoader, "()Ljava/lang/ClassLoader;"),
FAST_NATIVE_METHOD(VMStack, getClosestUserClassLoader, "()Ljava/lang/ClassLoader;"),
FAST_NATIVE_METHOD(VMStack, getStackClass2, "()Ljava/lang/Class;"),
FAST_NATIVE_METHOD(VMStack, getThreadStackTrace, "(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
+ FAST_NATIVE_METHOD(VMStack, getAnnotatedThreadStackTrace, "(Ljava/lang/Thread;)[Ldalvik/system/AnnotatedStackTraceElement;"),
};
void register_dalvik_system_VMStack(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index fd80aaeaf7..648a464b6e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -173,6 +173,8 @@ enum {
DEBUG_JAVA_DEBUGGABLE = 1 << 8,
DISABLE_VERIFIER = 1 << 9,
ONLY_USE_SYSTEM_OAT_FILES = 1 << 10,
+ DISABLE_HIDDEN_API_CHECKS = 1 << 11,
+ DEBUG_GENERATE_MINI_DEBUG_INFO = 1 << 12,
};
static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
@@ -209,12 +211,6 @@ static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
runtime_flags &= ~DEBUG_ENABLE_SAFEMODE;
}
- const bool generate_debug_info = (runtime_flags & DEBUG_GENERATE_DEBUG_INFO) != 0;
- if (generate_debug_info) {
- runtime->AddCompilerOption("--generate-debug-info");
- runtime_flags &= ~DEBUG_GENERATE_DEBUG_INFO;
- }
-
// This is for backwards compatibility with Dalvik.
runtime_flags &= ~DEBUG_ENABLE_ASSERT;
@@ -228,6 +224,7 @@ static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
bool needs_non_debuggable_classes = false;
if ((runtime_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
+ runtime_flags |= DEBUG_GENERATE_MINI_DEBUG_INFO;
runtime->SetJavaDebuggable(true);
// Deoptimize the boot image as it may be non-debuggable.
runtime->DeoptimizeBootImage();
@@ -240,11 +237,23 @@ static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
if ((runtime_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
- runtime->AddCompilerOption("--generate-debug-info");
+ runtime_flags |= DEBUG_GENERATE_DEBUG_INFO;
runtime->SetNativeDebuggable(true);
runtime_flags &= ~DEBUG_NATIVE_DEBUGGABLE;
}
+ if ((runtime_flags & DEBUG_GENERATE_MINI_DEBUG_INFO) != 0) {
+ // Generate native minimal debug information to allow backtracing.
+ runtime->AddCompilerOption("--generate-mini-debug-info");
+ runtime_flags &= ~DEBUG_GENERATE_MINI_DEBUG_INFO;
+ }
+
+ if ((runtime_flags & DEBUG_GENERATE_DEBUG_INFO) != 0) {
+ // Generate all native debug information we can (e.g. line-numbers).
+ runtime->AddCompilerOption("--generate-debug-info");
+ runtime_flags &= ~DEBUG_GENERATE_DEBUG_INFO;
+ }
+
return runtime_flags;
}
@@ -273,6 +282,7 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
// Our system thread ID, etc, has changed so reset Thread state.
thread->InitAfterFork();
runtime_flags = EnableDebugFeatures(runtime_flags);
+ bool do_hidden_api_checks = true;
if ((runtime_flags & DISABLE_VERIFIER) != 0) {
Runtime::Current()->DisableVerifier();
@@ -284,6 +294,11 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
runtime_flags &= ~ONLY_USE_SYSTEM_OAT_FILES;
}
+ if ((runtime_flags & DISABLE_HIDDEN_API_CHECKS) != 0) {
+ do_hidden_api_checks = false;
+ runtime_flags &= ~DISABLE_HIDDEN_API_CHECKS;
+ }
+
if (runtime_flags != 0) {
LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags);
}
@@ -331,6 +346,13 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
}
}
+ DCHECK(!is_system_server || !do_hidden_api_checks)
+ << "SystemServer should be forked with DISABLE_HIDDEN_API_CHECKS";
+ Runtime::Current()->SetHiddenApiChecksEnabled(do_hidden_api_checks);
+
+ // Clear the hidden API warning flag, in case it was set.
+ Runtime::Current()->SetPendingHiddenApiWarning(false);
+
if (instruction_set != nullptr && !is_system_server) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 7b999c04af..2091a27ffd 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -25,6 +25,8 @@
#include "common_throws.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_annotations.h"
+#include "dex/utf.h"
+#include "hidden_api.h"
#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -42,11 +44,80 @@
#include "reflection.h"
#include "scoped_fast_native_object_access-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "utf.h"
#include "well_known_classes.h"
namespace art {
+// Returns true if the first non-ClassClass caller up the stack is in boot class path.
+static bool IsCallerInBootClassPath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Walk the stack and find the first frame not from java.lang.Class.
+ // This is very expensive. Save this till the last.
+ struct FirstNonClassClassCallerVisitor : public StackVisitor {
+ explicit FirstNonClassClassCallerVisitor(Thread* thread)
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ caller(nullptr) {
+ }
+
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod *m = GetMethod();
+ if (m == nullptr) {
+ // Attached native thread. Assume this is *not* boot class path.
+ caller = nullptr;
+ return false;
+ } else if (m->IsRuntimeMethod()) {
+ // Internal runtime method, continue walking the stack.
+ return true;
+ } else if (m->GetDeclaringClass()->IsClassClass()) {
+ return true;
+ } else {
+ caller = m;
+ return false;
+ }
+ }
+
+ ArtMethod* caller;
+ };
+
+ FirstNonClassClassCallerVisitor visitor(self);
+ visitor.WalkStack();
+ return visitor.caller != nullptr &&
+ visitor.caller->GetDeclaringClass()->IsBootStrapClassLoaded();
+}
+
+// Returns true if the first non-ClassClass caller up the stack is not allowed to
+// access hidden APIs. This can be *very* expensive. Never call this in a loop.
+ALWAYS_INLINE static bool ShouldEnforceHiddenApi(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return Runtime::Current()->AreHiddenApiChecksEnabled() &&
+ !IsCallerInBootClassPath(self);
+}
+
+// Returns true if the first non-ClassClass caller up the stack should not be
+// allowed access to `member`.
+template<typename T>
+ALWAYS_INLINE static bool ShouldBlockAccessToMember(T* member, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return hiddenapi::ShouldBlockAccessToMember(member, self, IsCallerInBootClassPath);
+}
+
+// Returns true if a class member should be discoverable with reflection given
+// the criteria. Some reflection calls only return public members
+// (public_only == true), some members should be hidden from non-boot class path
+// callers (enforce_hidden_api == true).
+ALWAYS_INLINE static bool IsDiscoverable(bool public_only,
+ bool enforce_hidden_api,
+ uint32_t access_flags) {
+ if (public_only && ((access_flags & kAccPublic) == 0)) {
+ return false;
+ }
+
+ if (enforce_hidden_api && hiddenapi::GetMemberAction(access_flags) == hiddenapi::kDeny) {
+ return false;
+ }
+
+ return true;
+}
+
ALWAYS_INLINE static inline ObjPtr<mirror::Class> DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -164,17 +235,16 @@ static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
size_t array_size = klass->NumInstanceFields() + klass->NumStaticFields();
- if (public_only) {
- // Lets go subtract all the non public fields.
- for (ArtField& field : ifields) {
- if (!field.IsPublic()) {
- --array_size;
- }
+ bool enforce_hidden_api = ShouldEnforceHiddenApi(self);
+ // Lets go subtract all the non discoverable fields.
+ for (ArtField& field : ifields) {
+ if (!IsDiscoverable(public_only, enforce_hidden_api, field.GetAccessFlags())) {
+ --array_size;
}
- for (ArtField& field : sfields) {
- if (!field.IsPublic()) {
- --array_size;
- }
+ }
+ for (ArtField& field : sfields) {
+ if (!IsDiscoverable(public_only, enforce_hidden_api, field.GetAccessFlags())) {
+ --array_size;
}
}
size_t array_idx = 0;
@@ -184,7 +254,7 @@ static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
return nullptr;
}
for (ArtField& field : ifields) {
- if (!public_only || field.IsPublic()) {
+ if (IsDiscoverable(public_only, enforce_hidden_api, field.GetAccessFlags())) {
auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
&field,
force_resolve);
@@ -199,7 +269,7 @@ static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
}
}
for (ArtField& field : sfields) {
- if (!public_only || field.IsPublic()) {
+ if (IsDiscoverable(public_only, enforce_hidden_api, field.GetAccessFlags())) {
auto* reflect_field = mirror::Field::CreateFromArtField<kRuntimePointerSize>(self,
&field,
force_resolve);
@@ -354,8 +424,13 @@ static jobject Class_getPublicFieldRecursive(JNIEnv* env, jobject javaThis, jstr
ThrowNullPointerException("name == null");
return nullptr;
}
- return soa.AddLocalReference<jobject>(
- GetPublicFieldRecursive(soa.Self(), DecodeClass(soa, javaThis), name_string));
+
+ mirror::Field* field = GetPublicFieldRecursive(
+ soa.Self(), DecodeClass(soa, javaThis), name_string);
+ if (field == nullptr || ShouldBlockAccessToMember(field->GetArtField(), soa.Self())) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobject>(field);
}
static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) {
@@ -369,7 +444,7 @@ static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring nam
Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
Handle<mirror::Field> result =
hs.NewHandle(GetDeclaredField(soa.Self(), h_klass.Get(), h_string.Get()));
- if (result == nullptr) {
+ if (result == nullptr || ShouldBlockAccessToMember(result->GetArtField(), soa.Self())) {
std::string name_str = h_string->ToModifiedUtf8();
if (name_str == "value" && h_klass->IsStringClass()) {
// We log the error for this specific case, as the user might just swallow the exception.
@@ -399,24 +474,32 @@ static jobject Class_getDeclaredConstructorInternal(
soa.Self(),
DecodeClass(soa, javaThis),
soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
+ if (result == nullptr || ShouldBlockAccessToMember(result->GetArtMethod(), soa.Self())) {
+ return nullptr;
+ }
return soa.AddLocalReference<jobject>(result);
}
-static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
+static ALWAYS_INLINE inline bool MethodMatchesConstructor(
+ ArtMethod* m, bool public_only, bool enforce_hidden_api)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(m != nullptr);
- return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
+ return m->IsConstructor() &&
+ !m->IsStatic() &&
+ IsDiscoverable(public_only, enforce_hidden_api, m->GetAccessFlags());
}
static jobjectArray Class_getDeclaredConstructorsInternal(
JNIEnv* env, jobject javaThis, jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<2> hs(soa.Self());
+ bool public_only = (publicOnly != JNI_FALSE);
+ bool enforce_hidden_api = ShouldEnforceHiddenApi(soa.Self());
Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t constructor_count = 0;
// Two pass approach for speed.
for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
- constructor_count += MethodMatchesConstructor(&m, publicOnly != JNI_FALSE) ? 1u : 0u;
+ constructor_count += MethodMatchesConstructor(&m, public_only, enforce_hidden_api) ? 1u : 0u;
}
auto h_constructors = hs.NewHandle(mirror::ObjectArray<mirror::Constructor>::Alloc(
soa.Self(), mirror::Constructor::ArrayClass(), constructor_count));
@@ -426,7 +509,7 @@ static jobjectArray Class_getDeclaredConstructorsInternal(
}
constructor_count = 0;
for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
- if (MethodMatchesConstructor(&m, publicOnly != JNI_FALSE)) {
+ if (MethodMatchesConstructor(&m, public_only, enforce_hidden_api)) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
auto* constructor = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(
@@ -452,6 +535,9 @@ static jobject Class_getDeclaredMethodInternal(JNIEnv* env, jobject javaThis,
DecodeClass(soa, javaThis),
soa.Decode<mirror::String>(name),
soa.Decode<mirror::ObjectArray<mirror::Class>>(args));
+ if (result == nullptr || ShouldBlockAccessToMember(result->GetArtMethod(), soa.Self())) {
+ return nullptr;
+ }
return soa.AddLocalReference<jobject>(result);
}
@@ -459,13 +545,17 @@ static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaT
jboolean publicOnly) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<2> hs(soa.Self());
+
+ bool enforce_hidden_api = ShouldEnforceHiddenApi(soa.Self());
+ bool public_only = (publicOnly != JNI_FALSE);
+
Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
size_t num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
- auto modifiers = m.GetAccessFlags();
+ for (ArtMethod& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
+ uint32_t modifiers = m.GetAccessFlags();
// Add non-constructor declared methods.
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccConstructor) == 0) {
+ if ((modifiers & kAccConstructor) == 0 &&
+ IsDiscoverable(public_only, enforce_hidden_api, modifiers)) {
++num_methods;
}
}
@@ -476,10 +566,10 @@ static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaT
return nullptr;
}
num_methods = 0;
- for (auto& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
- auto modifiers = m.GetAccessFlags();
- if ((publicOnly == JNI_FALSE || (modifiers & kAccPublic) != 0) &&
- (modifiers & kAccConstructor) == 0) {
+ for (ArtMethod& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
+ uint32_t modifiers = m.GetAccessFlags();
+ if ((modifiers & kAccConstructor) == 0 &&
+ IsDiscoverable(public_only, enforce_hidden_api, modifiers)) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
auto* method =
@@ -693,11 +783,11 @@ static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
return nullptr;
}
}
- auto* constructor = klass->GetDeclaredConstructor(
+ ArtMethod* constructor = klass->GetDeclaredConstructor(
soa.Self(),
ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
kRuntimePointerSize);
- if (UNLIKELY(constructor == nullptr)) {
+ if (UNLIKELY(constructor == nullptr) || ShouldBlockAccessToMember(constructor, soa.Self())) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"%s has no zero argument constructor",
klass->PrettyClass().c_str());
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index fa46709422..099d77edaa 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -333,15 +333,15 @@ void DumpNativeStack(std::ostream& os,
os << prefix << StringPrintf("#%02zu pc ", it->num);
bool try_addr2line = false;
if (!BacktraceMap::IsValid(it->map)) {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???"
- : "%08" PRIxPTR " ???",
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " ???"
+ : "%08" PRIx64 " ???",
it->pc);
} else {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " "
- : "%08" PRIxPTR " ",
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " "
+ : "%08" PRIx64 " ",
it->rel_pc);
if (it->map.name.empty()) {
- os << StringPrintf("<anonymous:%" PRIxPTR ">", it->map.start);
+ os << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
} else {
os << it->map.name;
}
@@ -361,7 +361,7 @@ void DumpNativeStack(std::ostream& os,
PcIsWithinQuickCode(current_method, it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << current_method->JniLongName() << "+"
- << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
+ << (it->pc - reinterpret_cast<uint64_t>(start_of_code));
} else {
os << "???";
}
@@ -393,6 +393,10 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
std::vector<std::string> kernel_stack_frames;
Split(kernel_stack, '\n', &kernel_stack_frames);
+ if (kernel_stack_frames.empty()) {
+ os << prefix << "(" << kernel_stack_filename << " is empty)\n";
+ return;
+ }
// We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
// which looking at the source appears to be the kernel's way of saying "that's all, folks!".
kernel_stack_frames.pop_back();
diff --git a/runtime/oat.h b/runtime/oat.h
index 6d4f18bdb1..8f81010a06 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: 4-bit ClassStatus.
- static constexpr uint8_t kOatVersion[] = { '1', '3', '6', '\0' };
+ // Last oat version changed reason: Math.pow() intrinsic.
+ static constexpr uint8_t kOatVersion[] = { '1', '3', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index df07a191bc..dc4bae3415 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -43,9 +43,11 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
#include "dex/standard_dex_file.h"
+#include "dex/utf-inl.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "gc_root.h"
@@ -59,7 +61,6 @@
#include "os.h"
#include "runtime.h"
#include "type_lookup_table.h"
-#include "utf-inl.h"
#include "utils.h"
#include "vdex_file.h"
@@ -194,10 +195,6 @@ OatFileBase* OatFileBase::OpenOatFile(const std::string& vdex_filename,
ret->PreLoad();
- if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
- return nullptr;
- }
-
if (!ret->Load(elf_filename,
oat_file_begin,
writable,
@@ -211,6 +208,10 @@ OatFileBase* OatFileBase::OpenOatFile(const std::string& vdex_filename,
return nullptr;
}
+ if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
+ return nullptr;
+ }
+
ret->PreSetup(elf_filename);
if (!ret->Setup(abs_dex_location, error_msg)) {
@@ -234,10 +235,6 @@ OatFileBase* OatFileBase::OpenOatFile(int vdex_fd,
std::string* error_msg) {
std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
- if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
- return nullptr;
- }
-
if (!ret->Load(oat_fd,
oat_file_begin,
writable,
@@ -251,6 +248,10 @@ OatFileBase* OatFileBase::OpenOatFile(int vdex_fd,
return nullptr;
}
+ if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
+ return nullptr;
+ }
+
ret->PreSetup(oat_location);
if (!ret->Setup(abs_dex_location, error_msg)) {
@@ -264,7 +265,14 @@ bool OatFileBase::LoadVdex(const std::string& vdex_filename,
bool writable,
bool low_4gb,
std::string* error_msg) {
- vdex_ = VdexFile::Open(vdex_filename, writable, low_4gb, /* unquicken*/ false, error_msg);
+ vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
+ vdex_end_ - vdex_begin_,
+ vdex_begin_ != nullptr /* mmap_reuse */,
+ vdex_filename,
+ writable,
+ low_4gb,
+ /* unquicken*/ false,
+ error_msg);
if (vdex_.get() == nullptr) {
*error_msg = StringPrintf("Failed to load vdex file '%s' %s",
vdex_filename.c_str(),
@@ -285,13 +293,16 @@ bool OatFileBase::LoadVdex(int vdex_fd,
if (rc == -1) {
PLOG(WARNING) << "Failed getting length of vdex file";
} else {
- vdex_ = VdexFile::Open(vdex_fd,
- s.st_size,
- vdex_filename,
- writable,
- low_4gb,
- false /* unquicken */,
- error_msg);
+ vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
+ vdex_end_ - vdex_begin_,
+ vdex_begin_ != nullptr /* mmap_reuse */,
+ vdex_fd,
+ s.st_size,
+ vdex_filename,
+ writable,
+ low_4gb,
+ false /* unquicken */,
+ error_msg);
if (vdex_.get() == nullptr) {
*error_msg = "Failed opening vdex file.";
return false;
@@ -339,7 +350,7 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
} else {
bss_end_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbsslastword", &symbol_error_msg));
if (bss_end_ == nullptr) {
- *error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'", file_path.c_str());
+ *error_msg = StringPrintf("Failed to find oatbsslastword symbol in '%s'", file_path.c_str());
return false;
}
// Readjust to be non-inclusive upper bound.
@@ -351,6 +362,20 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
bss_roots_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbssroots", &symbol_error_msg));
}
+ vdex_begin_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatdex", &symbol_error_msg));
+ if (vdex_begin_ == nullptr) {
+ // No .vdex section.
+ vdex_end_ = nullptr;
+ } else {
+ vdex_end_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatdexlastword", &symbol_error_msg));
+ if (vdex_end_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find oatdexlastword symbol in '%s'", file_path.c_str());
+ return false;
+ }
+ // Readjust to be non-inclusive upper bound.
+ vdex_end_ += sizeof(uint32_t);
+ }
+
return true;
}
@@ -595,14 +620,6 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
dex_file_location.c_str());
return false;
}
- if (UNLIKELY(dex_file_offset == 0U)) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with zero dex "
- "file offset",
- GetLocation().c_str(),
- i,
- dex_file_location.c_str());
- return false;
- }
if (UNLIKELY(dex_file_offset > DexSize())) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
"offset %u > %zu",
@@ -613,20 +630,45 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
DexSize());
return false;
}
- if (UNLIKELY(DexSize() - dex_file_offset < sizeof(DexFile::Header))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
- "offset %u of %zu but the size of dex file header is %zu",
- GetLocation().c_str(),
- i,
- dex_file_location.c_str(),
- dex_file_offset,
- DexSize(),
- sizeof(DexFile::Header));
- return false;
+ const uint8_t* dex_file_pointer = nullptr;
+ if (UNLIKELY(dex_file_offset == 0U)) {
+ if (uncompressed_dex_files_ == nullptr) {
+ uncompressed_dex_files_.reset(new std::vector<std::unique_ptr<const DexFile>>());
+ // No dex files, load it from location.
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(dex_file_location.c_str(),
+ dex_file_location,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ error_msg,
+ uncompressed_dex_files_.get())) {
+ if (Runtime::Current() == nullptr) {
+ // If there's no runtime, we're running oatdump, so return
+ // a half constructed oat file that oatdump knows how to deal with.
+ LOG(WARNING) << "Could not find associated dex files of oat file. "
+ << "Oatdump will only dump the header.";
+ return true;
+ } else {
+ return false;
+ }
+ }
+ }
+ dex_file_pointer = uncompressed_dex_files_.get()->at(i)->Begin();
+ } else {
+ if (UNLIKELY(DexSize() - dex_file_offset < sizeof(DexFile::Header))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
+ "offset %u of %zu but the size of dex file header is %zu",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_offset,
+ DexSize(),
+ sizeof(DexFile::Header));
+ return false;
+ }
+ dex_file_pointer = DexBegin() + dex_file_offset;
}
- const uint8_t* dex_file_pointer = DexBegin() + dex_file_offset;
-
const bool valid_magic = DexFileLoader::IsMagicValid(dex_file_pointer);
if (UNLIKELY(!valid_magic)) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
@@ -647,7 +689,7 @@ bool OatFileBase::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
- if (DexSize() - dex_file_offset < header->file_size_) {
+ if (dex_file_offset != 0 && (DexSize() - dex_file_offset < header->file_size_)) {
*error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
"offset %u and size %u truncated at %zu",
GetLocation().c_str(),
@@ -1272,13 +1314,15 @@ bool ElfOatFile::ElfFileOpen(File* file,
std::string OatFile::ResolveRelativeEncodedDexLocation(
const char* abs_dex_location, const std::string& rel_dex_location) {
- if (abs_dex_location != nullptr && rel_dex_location[0] != '/') {
+ // For host, we still do resolution as the rel_dex_location might be absolute
+ // for a target dex (for example /system/foo/foo.apk).
+ if (abs_dex_location != nullptr && (rel_dex_location[0] != '/' || !kIsTargetBuild)) {
// Strip :classes<N>.dex used for secondary multidex files.
std::string base = DexFileLoader::GetBaseLocation(rel_dex_location);
std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(rel_dex_location);
// Check if the base is a suffix of the provided abs_dex_location.
- std::string target_suffix = "/" + base;
+ std::string target_suffix = ((rel_dex_location[0] != '/') ? "/" : "") + base;
std::string abs_location(abs_dex_location);
if (abs_location.size() > target_suffix.size()) {
size_t pos = abs_location.size() - target_suffix.size();
@@ -1441,6 +1485,8 @@ OatFile::OatFile(const std::string& location, bool is_executable)
bss_methods_(nullptr),
bss_roots_(nullptr),
is_executable_(is_executable),
+ vdex_begin_(nullptr),
+ vdex_end_(nullptr),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -1471,6 +1517,14 @@ const uint8_t* OatFile::BssEnd() const {
return bss_end_;
}
+const uint8_t* OatFile::VdexBegin() const {
+ return vdex_begin_;
+}
+
+const uint8_t* OatFile::VdexEnd() const {
+ return vdex_end_;
+}
+
const uint8_t* OatFile::DexBegin() const {
return vdex_->Begin();
}
@@ -1500,21 +1554,6 @@ ArrayRef<GcRoot<mirror::Object>> OatFile::GetBssGcRoots() const {
}
}
-uint32_t OatFile::GetDebugInfoOffset(const DexFile& dex_file, uint32_t debug_info_off) {
- // Note that although the specification says that 0 should be used if there
- // is no debug information, some applications incorrectly use 0xFFFFFFFF.
- // The following check also handles debug_info_off == 0.
- if (debug_info_off < dex_file.Size() || debug_info_off == 0xFFFFFFFF) {
- return debug_info_off;
- }
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
- if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
- return debug_info_off;
- }
- return oat_dex_file->GetOatFile()->GetVdexFile()->GetDebugInfoOffset(
- dex_file, debug_info_off);
-}
-
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
std::string* error_msg) const {
@@ -1614,7 +1653,12 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
if (lookup_table_data_ + TypeLookupTable::RawDataLength(num_class_defs) > GetOatFile()->End()) {
LOG(WARNING) << "found truncated lookup table in " << dex_file_location_;
} else {
- lookup_table_ = TypeLookupTable::Open(dex_file_pointer_, lookup_table_data_, num_class_defs);
+ const uint8_t* dex_data = dex_file_pointer_;
+ // TODO: Clean this up to create the type lookup table after the dex file has been created?
+ if (CompactDexFile::IsMagicValid(dex_header->magic_)) {
+ dex_data += dex_header->data_off_;
+ }
+ lookup_table_ = TypeLookupTable::Open(dex_data, lookup_table_data_, num_class_defs);
}
}
}
@@ -1632,14 +1676,15 @@ std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* err
ScopedTrace trace(__PRETTY_FUNCTION__);
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
- return DexFileLoader::Open(dex_file_pointer_,
- FileSize(),
- dex_file_location_,
- dex_file_location_checksum_,
- this,
- kVerify,
- kVerifyChecksum,
- error_msg);
+ const ArtDexFileLoader dex_file_loader;
+ return dex_file_loader.Open(dex_file_pointer_,
+ FileSize(),
+ dex_file_location_,
+ dex_file_location_checksum_,
+ this,
+ kVerify,
+ kVerifyChecksum,
+ error_msg);
}
uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
@@ -1693,9 +1738,17 @@ const DexFile::ClassDef* OatFile::OatDexFile::FindClassDef(const DexFile& dex_fi
size_t hash) {
const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
+ bool used_lookup_table = false;
+ const DexFile::ClassDef* lookup_table_classdef = nullptr;
if (LIKELY((oat_dex_file != nullptr) && (oat_dex_file->GetTypeLookupTable() != nullptr))) {
+ used_lookup_table = true;
const uint32_t class_def_idx = oat_dex_file->GetTypeLookupTable()->Lookup(descriptor, hash);
- return (class_def_idx != dex::kDexNoIndex) ? &dex_file.GetClassDef(class_def_idx) : nullptr;
+ lookup_table_classdef = (class_def_idx != dex::kDexNoIndex)
+ ? &dex_file.GetClassDef(class_def_idx)
+ : nullptr;
+ if (!kIsDebugBuild) {
+ return lookup_table_classdef;
+ }
}
// Fast path for rare no class defs case.
const uint32_t num_class_defs = dex_file.NumClassDefs();
@@ -1705,7 +1758,11 @@ const DexFile::ClassDef* OatFile::OatDexFile::FindClassDef(const DexFile& dex_fi
const DexFile::TypeId* type_id = dex_file.FindTypeId(descriptor);
if (type_id != nullptr) {
dex::TypeIndex type_idx = dex_file.GetIndexForTypeId(*type_id);
- return dex_file.FindClassDef(type_idx);
+ const DexFile::ClassDef* found_class_def = dex_file.FindClassDef(type_idx);
+ if (kIsDebugBuild && used_lookup_table) {
+ DCHECK_EQ(found_class_def, lookup_table_classdef);
+ }
+ return found_class_def;
}
return nullptr;
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 02318b68b7..46c692e568 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -28,12 +28,12 @@
#include "compiler_filter.h"
#include "dex/dex_file.h"
#include "dex/dex_file_layout.h"
+#include "dex/utf.h"
#include "index_bss_mapping.h"
#include "mirror/object.h"
#include "oat.h"
#include "os.h"
#include "type_lookup_table.h"
-#include "utf.h"
#include "utils.h"
namespace art {
@@ -115,10 +115,6 @@ class OatFile {
const char* abs_dex_location,
std::string* error_msg);
- // Return the actual debug info offset for an offset that might be actually pointing to
- // dequickening info. The returned debug info offset is the one originally in the the dex file.
- static uint32_t GetDebugInfoOffset(const DexFile& dex_file, uint32_t debug_info_off);
-
virtual ~OatFile();
bool IsExecutable() const {
@@ -279,6 +275,10 @@ class OatFile {
return BssEnd() - BssBegin();
}
+ size_t VdexSize() const {
+ return VdexEnd() - VdexBegin();
+ }
+
size_t BssMethodsOffset() const {
// Note: This is used only for symbolizer and needs to return a valid .bss offset.
return (bss_methods_ != nullptr) ? bss_methods_ - BssBegin() : BssRootsOffset();
@@ -299,6 +299,9 @@ class OatFile {
const uint8_t* BssBegin() const;
const uint8_t* BssEnd() const;
+ const uint8_t* VdexBegin() const;
+ const uint8_t* VdexEnd() const;
+
const uint8_t* DexBegin() const;
const uint8_t* DexEnd() const;
@@ -325,6 +328,11 @@ class OatFile {
return vdex_.get();
}
+ // Whether the OatFile embeds the Dex code.
+ bool ContainsDexCode() const {
+ return uncompressed_dex_files_ == nullptr;
+ }
+
protected:
OatFile(const std::string& filename, bool executable);
@@ -358,6 +366,12 @@ class OatFile {
// Was this oat_file loaded executable?
const bool is_executable_;
+ // Pointer to the .vdex section, if present, otherwise null.
+ uint8_t* vdex_begin_;
+
+ // Pointer to the end of the .vdex section, if present, otherwise null.
+ uint8_t* vdex_end_;
+
// Owning storage for the OatDexFile objects.
std::vector<const OatDexFile*> oat_dex_files_storage_;
@@ -390,6 +404,10 @@ class OatFile {
// elements. std::list<> and std::deque<> satisfy this requirement, std::vector<> doesn't.
mutable std::list<std::string> string_cache_ GUARDED_BY(secondary_lookup_lock_);
+ // Cache of dex files mapped directly from a location, in case the OatFile does
+ // not embed the dex code.
+ std::unique_ptr<std::vector<std::unique_ptr<const DexFile>>> uncompressed_dex_files_;
+
friend class gc::collector::DummyOatFile; // For modifying begin_ and end_.
friend class OatClass;
friend class art::OatDexFile;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 240030cf5b..15a5954396 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -28,6 +28,7 @@
#include "base/stl_util.h"
#include "class_linker.h"
#include "compiler_filter.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
@@ -71,9 +72,12 @@ std::ostream& operator << (std::ostream& stream, const OatFileAssistant::OatStat
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
- bool load_executable)
+ bool load_executable,
+ bool only_load_system_executable)
: OatFileAssistant(dex_location,
- isa, load_executable,
+ isa,
+ load_executable,
+ only_load_system_executable,
-1 /* vdex_fd */,
-1 /* oat_fd */,
-1 /* zip_fd */) {}
@@ -82,11 +86,13 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
bool load_executable,
+ bool only_load_system_executable,
int vdex_fd,
int oat_fd,
int zip_fd)
: isa_(isa),
load_executable_(load_executable),
+ only_load_system_executable_(only_load_system_executable),
odex_(this, /*is_oat_location*/ false),
oat_(this, /*is_oat_location*/ true),
zip_fd_(zip_fd) {
@@ -869,10 +875,11 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
required_dex_checksums_found_ = false;
cached_required_dex_checksums_.clear();
std::string error_msg;
- if (DexFileLoader::GetMultiDexChecksums(dex_location_.c_str(),
- &cached_required_dex_checksums_,
- &error_msg,
- zip_fd_)) {
+ const ArtDexFileLoader dex_file_loader;
+ if (dex_file_loader.GetMultiDexChecksums(dex_location_.c_str(),
+ &cached_required_dex_checksums_,
+ &error_msg,
+ zip_fd_)) {
required_dex_checksums_found_ = true;
has_original_dex_files_ = true;
} else {
@@ -1120,6 +1127,10 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
if (!load_attempted_) {
load_attempted_ = true;
if (filename_provided_) {
+ bool executable = oat_file_assistant_->load_executable_;
+ if (executable && oat_file_assistant_->only_load_system_executable_) {
+ executable = LocationIsOnSystem(filename_.c_str());
+ }
std::string error_msg;
if (use_fd_) {
if (oat_fd_ >= 0 && vdex_fd_ >= 0) {
@@ -1128,7 +1139,7 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
filename_.c_str(),
nullptr,
nullptr,
- oat_file_assistant_->load_executable_,
+ executable,
false /* low_4gb */,
oat_file_assistant_->dex_location_.c_str(),
&error_msg));
@@ -1138,7 +1149,7 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
filename_.c_str(),
nullptr,
nullptr,
- oat_file_assistant_->load_executable_,
+ executable,
false /* low_4gb */,
oat_file_assistant_->dex_location_.c_str(),
&error_msg));
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 6c01c1e880..a6140304c2 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -119,9 +119,13 @@ class OatFileAssistant {
//
// load_executable should be true if the caller intends to try and load
// executable code for this dex location.
+ //
+ // only_load_system_executable should be true if the caller intends to have
+ // only oat files from /system loaded executable.
OatFileAssistant(const char* dex_location,
const InstructionSet isa,
- bool load_executable);
+ bool load_executable,
+ bool only_load_system_executable = false);
// Similar to this(const char*, const InstructionSet, bool), however, if a valid zip_fd is
// provided, vdex, oat, and zip files will be read from vdex_fd, oat_fd and zip_fd respectively.
@@ -129,6 +133,7 @@ class OatFileAssistant {
OatFileAssistant(const char* dex_location,
const InstructionSet isa,
bool load_executable,
+ bool only_load_system_executable,
int vdex_fd,
int oat_fd,
int zip_fd);
@@ -487,6 +492,9 @@ class OatFileAssistant {
// Whether we will attempt to load oat files executable.
bool load_executable_ = false;
+ // Whether only oat files on /system are loaded executable.
+ const bool only_load_system_executable_ = false;
+
// Cached value of the required dex checksums.
// This should be accessed only by the GetRequiredDexChecksums() method.
std::vector<uint32_t> cached_required_dex_checksums_;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index a98da0f029..50f5e7a0d5 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -246,6 +246,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
+ false,
vdex_fd.get(),
odex_fd.get(),
zip_fd.get());
@@ -285,6 +286,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
+ false,
vdex_fd.get(),
-1 /* oat_fd */,
zip_fd.get());
@@ -319,6 +321,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
+ false,
-1 /* vdex_fd */,
odex_fd.get(),
zip_fd.get());
@@ -342,6 +345,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
OatFileAssistant oat_file_assistant(dex_location.c_str(),
kRuntimeISA,
false,
+ false,
-1 /* vdex_fd */,
-1 /* oat_fd */,
zip_fd);
@@ -1439,6 +1443,60 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
default_filter, false, false, relative_context.get()));
}
+TEST_F(OatFileAssistantTest, SystemOdex) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
+ std::string system_location = GetAndroidRoot() + "/OatUpToDate.jar";
+
+ std::string error_msg;
+
+ Copy(GetDexSrc1(), dex_location);
+ EXPECT_FALSE(LocationIsOnSystem(dex_location.c_str()));
+
+ {
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ true,
+ false);
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+ EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
+ }
+
+ {
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ true,
+ true);
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+ EXPECT_FALSE(oat_file_assistant.GetBestOatFile()->IsExecutable());
+ }
+
+ Copy(GetDexSrc1(), system_location);
+ EXPECT_TRUE(LocationIsOnSystem(system_location.c_str()));
+
+ {
+ OatFileAssistant oat_file_assistant(system_location.c_str(),
+ kRuntimeISA,
+ true,
+ false);
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+ EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
+ }
+
+ {
+ OatFileAssistant oat_file_assistant(system_location.c_str(),
+ kRuntimeISA,
+ true,
+ true);
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+ EXPECT_TRUE(oat_file_assistant.GetBestOatFile()->IsExecutable());
+ }
+}
+
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 29b9bfcf7f..e4194442d3 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -31,6 +31,7 @@
#include "base/systrace.h"
#include "class_linker.h"
#include "class_loader_context.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_tracking_registrar.h"
@@ -40,6 +41,7 @@
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
+#include "oat_file.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -54,15 +56,11 @@ using android::base::StringPrintf;
// If true, we attempt to load the application image if it exists.
static constexpr bool kEnableAppImage = true;
-static bool OatFileIsOnSystem(const std::unique_ptr<const OatFile>& oat_file) {
- UniqueCPtr<const char[]> path(realpath(oat_file->GetLocation().c_str(), nullptr));
- return path != nullptr && android::base::StartsWith(oat_file->GetLocation(),
- GetAndroidRoot().c_str());
-}
-
const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
- CHECK(!only_use_system_oat_files_ || OatFileIsOnSystem(oat_file))
+ CHECK(!only_use_system_oat_files_ ||
+ LocationIsOnSystem(oat_file->GetLocation().c_str()) ||
+ !oat_file->IsExecutable())
<< "Registering a non /system oat file: " << oat_file->GetLocation();
DCHECK(oat_file != nullptr);
if (kIsDebugBuild) {
@@ -422,7 +420,8 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
OatFileAssistant oat_file_assistant(dex_location,
kRuntimeISA,
- !runtime->IsAotCompiler());
+ !runtime->IsAotCompiler(),
+ only_use_system_oat_files_);
// Lock the target oat location to avoid races generating and loading the
// oat file.
@@ -435,8 +434,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
const OatFile* source_oat_file = nullptr;
- // No point in trying to make up-to-date if we can only use system oat files.
- if (!only_use_system_oat_files_ && !oat_file_assistant.IsUpToDate()) {
+ if (!oat_file_assistant.IsUpToDate()) {
// Update the oat file on disk if we can, based on the --compiler-filter
// option derived from the current runtime options.
// This may fail, but that's okay. Best effort is all that matters here.
@@ -472,9 +470,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
// Get the oat file on disk.
std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
- if (oat_file != nullptr && only_use_system_oat_files_ && !OatFileIsOnSystem(oat_file)) {
- // If the oat file is not on /system, don't use it.
- } else if ((class_loader != nullptr || dex_elements != nullptr) && oat_file != nullptr) {
+ if ((class_loader != nullptr || dex_elements != nullptr) && oat_file != nullptr) {
// Prevent oat files from being loaded if no class_loader or dex_elements are provided.
// This can happen when the deprecated DexFile.<init>(String) is called directly, and it
// could load oat files without checking the classpath, which would be incorrect.
@@ -527,8 +523,14 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (source_oat_file != nullptr) {
bool added_image_space = false;
if (source_oat_file->IsExecutable()) {
- std::unique_ptr<gc::space::ImageSpace> image_space =
- kEnableAppImage ? oat_file_assistant.OpenImageSpace(source_oat_file) : nullptr;
+ // We need to throw away the image space if we are debuggable but the oat-file source of the
+ // image is not otherwise we might get classes with inlined methods or other such things.
+ std::unique_ptr<gc::space::ImageSpace> image_space;
+ if (kEnableAppImage && (!runtime->IsJavaDebuggable() || source_oat_file->IsDebuggable())) {
+ image_space = oat_file_assistant.OpenImageSpace(source_oat_file);
+ } else {
+ image_space = nullptr;
+ }
if (image_space != nullptr) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(self);
@@ -606,12 +608,13 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(dex_location,
- dex_location,
- Runtime::Current()->IsVerificationEnabled(),
- kVerifyChecksum,
- /*out*/ &error_msg,
- &dex_files)) {
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(dex_location,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ /*out*/ &error_msg,
+ &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
+ " because: " + error_msg);
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index dd6b7ba2ff..038474e31f 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -127,6 +127,9 @@ class OatFileManager {
std::set<std::unique_ptr<const OatFile>> oat_files_ GUARDED_BY(Locks::oat_file_manager_lock_);
bool have_non_pic_oat_file_;
+
+ // Only use the compiled code in an OAT file when the file is on /system. If the OAT file
+ // is not on /system, don't load it "executable".
bool only_use_system_oat_files_;
DISALLOW_COPY_AND_ASSIGN(OatFileManager);
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 7bf0f84596..8d864018ab 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -21,11 +21,13 @@
#include <gtest/gtest.h>
#include "common_runtime_test.h"
+#include "dexopt_test.h"
#include "scoped_thread_state_change-inl.h"
+#include "vdex_file.h"
namespace art {
-class OatFileTest : public CommonRuntimeTest {
+class OatFileTest : public DexoptTest {
};
TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
@@ -62,4 +64,28 @@ TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
"/data/app/foo/base.apk", "o/base.apk"));
}
+TEST_F(OatFileTest, LoadOat) {
+ std::string dex_location = GetScratchDir() + "/LoadOat.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ std::string oat_location;
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
+ dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_location.c_str(),
+ oat_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() != nullptr);
+
+ // Check that the vdex file was loaded in the reserved space of odex file.
+ EXPECT_EQ(odex_file->GetVdexFile()->Begin(), odex_file->VdexBegin());
+}
+
} // namespace art
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index 70e767acf6..14fdba31d9 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -37,7 +37,7 @@ constexpr bool kObjPtrPoisoningValidateOnCopy = false;
template<class MirrorType>
class ObjPtr {
static constexpr size_t kCookieShift =
- sizeof(kHeapReferenceSize) * kBitsPerByte - kObjectAlignmentShift;
+ kHeapReferenceSize * kBitsPerByte - kObjectAlignmentShift;
static constexpr size_t kCookieBits = sizeof(uintptr_t) * kBitsPerByte - kCookieShift;
static constexpr uintptr_t kCookieMask = (static_cast<uintptr_t>(1u) << kCookieBits) - 1;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3ac3d03e90..92eb703338 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -327,6 +327,11 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(M::SlowDebug)
+ .Define("-Xtarget-sdk-version:_")
+ .WithType<int>()
+ .IntoKey(M::TargetSdkVersion)
+ .Define("-Xno-hidden-api-checks")
+ .IntoKey(M::NoHiddenApiChecks)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 5b163d8cbe..38ad68d13d 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -140,12 +140,30 @@ class Primitive {
// Returns the descriptor corresponding to the boxed type of |type|.
static const char* BoxedDescriptor(Type type);
- // Return true if |type| is an numeric type.
+ // Returns true if |type| is an numeric type.
static constexpr bool IsNumericType(Type type) {
switch (type) {
case Primitive::Type::kPrimNot: return false;
case Primitive::Type::kPrimBoolean: return false;
case Primitive::Type::kPrimByte: return true;
+ case Primitive::Type::kPrimChar: return true;
+ case Primitive::Type::kPrimShort: return true;
+ case Primitive::Type::kPrimInt: return true;
+ case Primitive::Type::kPrimLong: return true;
+ case Primitive::Type::kPrimFloat: return true;
+ case Primitive::Type::kPrimDouble: return true;
+ case Primitive::Type::kPrimVoid: return false;
+ }
+ LOG(FATAL) << "Invalid type " << static_cast<int>(type);
+ UNREACHABLE();
+ }
+
+ // Return trues if |type| is a signed numeric type.
+ static constexpr bool IsSignedNumericType(Type type) {
+ switch (type) {
+ case Primitive::Type::kPrimNot: return false;
+ case Primitive::Type::kPrimBoolean: return false;
+ case Primitive::Type::kPrimByte: return true;
case Primitive::Type::kPrimChar: return false;
case Primitive::Type::kPrimShort: return true;
case Primitive::Type::kPrimInt: return true;
@@ -158,17 +176,39 @@ class Primitive {
UNREACHABLE();
}
+ // Returns the number of bits required to hold the largest
+ // positive number that can be represented by |type|.
+ static constexpr size_t BitsRequiredForLargestValue(Type type) {
+ switch (type) {
+ case Primitive::Type::kPrimNot: return 0u;
+ case Primitive::Type::kPrimBoolean: return 1u;
+ case Primitive::Type::kPrimByte: return 7u;
+ case Primitive::Type::kPrimChar: return 16u;
+ case Primitive::Type::kPrimShort: return 15u;
+ case Primitive::Type::kPrimInt: return 31u;
+ case Primitive::Type::kPrimLong: return 63u;
+ case Primitive::Type::kPrimFloat: return 128u;
+ case Primitive::Type::kPrimDouble: return 1024u;
+ case Primitive::Type::kPrimVoid: return 0u;
+ }
+ }
+
// Returns true if it is possible to widen type |from| to type |to|. Both |from| and
// |to| should be numeric primitive types.
static bool IsWidenable(Type from, Type to) {
- static_assert(Primitive::Type::kPrimByte < Primitive::Type::kPrimShort, "Bad ordering");
- static_assert(Primitive::Type::kPrimShort < Primitive::Type::kPrimInt, "Bad ordering");
- static_assert(Primitive::Type::kPrimInt < Primitive::Type::kPrimLong, "Bad ordering");
- static_assert(Primitive::Type::kPrimLong < Primitive::Type::kPrimFloat, "Bad ordering");
- static_assert(Primitive::Type::kPrimFloat < Primitive::Type::kPrimDouble, "Bad ordering");
- // Widening is only applicable between numeric types, like byte
- // and int. Non-numeric types, such as boolean, cannot be widened.
- return IsNumericType(from) && IsNumericType(to) && from <= to;
+ if (!IsNumericType(from) || !IsNumericType(to)) {
+ // Widening is only applicable between numeric types.
+ return false;
+ }
+ if (IsSignedNumericType(from) && !IsSignedNumericType(to)) {
+ // Nowhere to store the sign bit in |to|.
+ return false;
+ }
+ if (BitsRequiredForLargestValue(from) > BitsRequiredForLargestValue(to)) {
+ // The from,to pair corresponds to a narrowing.
+ return false;
+ }
+ return true;
}
static bool Is64BitType(Type type) {
diff --git a/runtime/primitive_test.cc b/runtime/primitive_test.cc
new file mode 100644
index 0000000000..e433b15b61
--- /dev/null
+++ b/runtime/primitive_test.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "primitive.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+namespace {
+
+void CheckPrimitiveTypeWidensTo(Primitive::Type from,
+ const std::vector<Primitive::Type>& expected_to_types) {
+ std::vector<Primitive::Type> actual_to_types;
+ int last = static_cast<int>(Primitive::Type::kPrimLast);
+ for (int i = 0; i <= last; ++i) {
+ Primitive::Type to = static_cast<Primitive::Type>(i);
+ if (Primitive::IsWidenable(from, to)) {
+ actual_to_types.push_back(to);
+ }
+ }
+ EXPECT_EQ(expected_to_types, actual_to_types);
+}
+
+} // namespace
+
+TEST(PrimitiveTest, NotWidensTo) {
+ const std::vector<Primitive::Type> to_types = {};
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimNot, to_types);
+}
+
+TEST(PrimitiveTest, BooleanWidensTo) {
+ const std::vector<Primitive::Type> to_types = {};
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimBoolean, to_types);
+}
+
+TEST(PrimitiveTest, ByteWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimByte,
+ Primitive::Type::kPrimShort,
+ Primitive::Type::kPrimInt,
+ Primitive::Type::kPrimLong,
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimByte, to_types);
+}
+
+TEST(PrimitiveTest, CharWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimChar,
+ Primitive::Type::kPrimInt,
+ Primitive::Type::kPrimLong,
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimChar, to_types);
+}
+
+TEST(PrimitiveTest, ShortWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimShort,
+ Primitive::Type::kPrimInt,
+ Primitive::Type::kPrimLong,
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimShort, to_types);
+}
+
+TEST(PrimitiveTest, IntWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimInt,
+ Primitive::Type::kPrimLong,
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimInt, to_types);
+}
+
+TEST(PrimitiveTest, LongWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimLong,
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimLong, to_types);
+}
+
+TEST(PrimitiveTest, FloatWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimFloat,
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimFloat, to_types);
+}
+
+TEST(PrimitiveTest, DoubleWidensTo) {
+ const std::vector<Primitive::Type> to_types = {
+ Primitive::Type::kPrimDouble,
+ };
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimDouble, to_types);
+}
+
+TEST(PrimitiveTest, VoidWidensTo) {
+ const std::vector<Primitive::Type> to_types = {};
+ CheckPrimitiveTypeWidensTo(Primitive::Type::kPrimVoid, to_types);
+}
+
+} // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 3a7640fa8b..006405f095 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -222,7 +222,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
self_->DumpStack(LOG_STREAM(INFO) << "Setting catch phis: ");
}
- CodeItemDataAccessor accessor(handler_method_);
+ CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
@@ -360,7 +360,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
const size_t frame_id = GetFrameId();
ShadowFrame* new_frame = GetThread()->FindDebuggerShadowFrame(frame_id);
const bool* updated_vregs;
- CodeItemDataAccessor accessor(method);
+ CodeItemDataAccessor accessor(method->DexInstructionData());
const size_t num_regs = accessor.RegistersSize();
if (new_frame == nullptr) {
new_frame = ShadowFrame::CreateDeoptimizedFrame(num_regs, nullptr, method, GetDexPc());
@@ -408,7 +408,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
diff --git a/runtime/quicken_info.h b/runtime/quicken_info.h
index ce11f3c19b..52eca61c06 100644
--- a/runtime/quicken_info.h
+++ b/runtime/quicken_info.h
@@ -17,15 +17,93 @@
#ifndef ART_RUNTIME_QUICKEN_INFO_H_
#define ART_RUNTIME_QUICKEN_INFO_H_
+#include "base/array_ref.h"
#include "dex/dex_instruction.h"
+#include "leb128.h"
namespace art {
-// QuickenInfoTable is a table of 16 bit dex indices. There is one slot fo every instruction that is
-// possibly dequickenable.
+// Table for getting the offset of quicken info. Doesn't have one slot for each index, so a
+// combination of iteration and indexing is required to get the quicken info for a given dex method
+// index.
+class QuickenInfoOffsetTableAccessor {
+ public:
+ using TableType = uint32_t;
+ static constexpr uint32_t kElementsPerIndex = 16;
+
+ class Builder {
+ public:
+ explicit Builder(std::vector<uint8_t>* out_data) : out_data_(out_data) {}
+
+ void AddOffset(uint32_t index) {
+ out_data_->insert(out_data_->end(),
+ reinterpret_cast<const uint8_t*>(&index),
+ reinterpret_cast<const uint8_t*>(&index + 1));
+ }
+
+ private:
+ std::vector<uint8_t>* const out_data_;
+ };
+
+ // The table only covers every kElementsPerIndex indices.
+ static bool IsCoveredIndex(uint32_t index) {
+ return index % kElementsPerIndex == 0;
+ }
+
+ explicit QuickenInfoOffsetTableAccessor(const uint8_t* data, uint32_t max_index)
+ : table_(reinterpret_cast<const uint32_t*>(data)),
+ num_indices_(RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex) {}
+
+ size_t SizeInBytes() const {
+ return NumIndices() * sizeof(table_[0]);
+ }
+
+ uint32_t NumIndices() const {
+ return num_indices_;
+ }
+
+ // Returns the offset for the index at or before the desired index. If the offset is for an index
+ // before the desired one, remainder is how many elements to traverse to reach the desired index.
+ TableType ElementOffset(uint32_t index, uint32_t* remainder) const {
+ *remainder = index % kElementsPerIndex;
+ return table_[index / kElementsPerIndex];
+ }
+
+ const uint8_t* DataEnd() const {
+ return reinterpret_cast<const uint8_t*>(table_ + NumIndices());
+ }
+
+ static uint32_t Alignment() {
+ return alignof(TableType);
+ }
+
+ private:
+ const TableType* table_;
+ uint32_t num_indices_;
+};
+
+// QuickenInfoTable is a table of 16 bit dex indices. There is one slot for every instruction that
+// is possibly dequickenable.
class QuickenInfoTable {
public:
- explicit QuickenInfoTable(const uint8_t* data) : data_(data) {}
+ class Builder {
+ public:
+ Builder(std::vector<uint8_t>* out_data, size_t num_elements) : out_data_(out_data) {
+ EncodeUnsignedLeb128(out_data_, num_elements);
+ }
+
+ void AddIndex(uint16_t index) {
+ out_data_->push_back(static_cast<uint8_t>(index));
+ out_data_->push_back(static_cast<uint8_t>(index >> 8));
+ }
+
+ private:
+ std::vector<uint8_t>* const out_data_;
+ };
+
+ explicit QuickenInfoTable(ArrayRef<const uint8_t> data)
+ : data_(data.data()),
+ num_elements_(!data.empty() ? DecodeUnsignedLeb128(&data_) : 0u) {}
bool IsNull() const {
return data_ == nullptr;
@@ -44,8 +122,18 @@ class QuickenInfoTable {
return bytes / sizeof(uint16_t);
}
+ static size_t SizeInBytes(ArrayRef<const uint8_t> data) {
+ QuickenInfoTable table(data);
+ return table.data_ + table.NumIndices() * 2 - data.data();
+ }
+
+ uint32_t NumIndices() const {
+ return num_elements_;
+ }
+
private:
- const uint8_t* const data_;
+ const uint8_t* data_;
+ const uint32_t num_elements_;
DISALLOW_COPY_AND_ASSIGN(QuickenInfoTable);
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 38c2bfd96f..25d83dff86 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -69,6 +69,7 @@
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -249,7 +250,7 @@ Runtime::Runtime()
preinitialization_transactions_(),
verify_(verifier::VerifyMode::kNone),
allow_dex_file_fallback_(true),
- target_sdk_version_(0),
+ target_sdk_version_(kUnsetSdkVersion),
implicit_null_checks_(false),
implicit_so_checks_(false),
implicit_suspend_checks_(false),
@@ -264,6 +265,10 @@ Runtime::Runtime()
oat_file_manager_(nullptr),
is_low_memory_mode_(false),
safe_mode_(false),
+ do_hidden_api_checks_(true),
+ pending_hidden_api_warning_(false),
+ dedupe_hidden_api_warnings_(true),
+ always_set_hidden_api_warning_flag_(false),
dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
@@ -1041,6 +1046,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
+ const ArtDexFileLoader dex_file_loader;
failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i].c_str();
@@ -1051,12 +1057,12 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFileLoader::Open(dex_filename,
- dex_location,
- Runtime::Current()->IsVerificationEnabled(),
- kVerifyChecksum,
- &error_msg,
- dex_files)) {
+ if (!dex_file_loader.Open(dex_filename,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ &error_msg,
+ dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
@@ -1164,6 +1170,14 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
+ target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
+
+ // Check whether to enforce hidden API access checks. Zygote needs to be exempt
+ // but checks may be enabled for forked processes (see dalvik_system_ZygoteHooks).
+ if (is_zygote_ || runtime_options.Exists(Opt::NoHiddenApiChecks)) {
+ do_hidden_api_checks_ = false;
+ }
+
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1242,7 +1256,20 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
jdwp_provider_ = runtime_options.GetOrDefault(Opt::JdwpProvider);
switch (jdwp_provider_) {
case JdwpProvider::kNone: {
- LOG(WARNING) << "Disabling all JDWP support.";
+ LOG(INFO) << "Disabling all JDWP support.";
+ if (!jdwp_options_.empty()) {
+ bool has_transport = jdwp_options_.find("transport") != std::string::npos;
+ const char* transport_internal = !has_transport ? "transport=dt_android_adb," : "";
+ std::string adb_connection_args =
+ std::string(" -XjdwpProvider:adbconnection -XjdwpOptions:") + jdwp_options_;
+ LOG(WARNING) << "Jdwp options given when jdwp is disabled! You probably want to enable "
+ << "jdwp with one of:" << std::endl
+ << " -XjdwpProvider:internal "
+ << "-XjdwpOptions:" << transport_internal << jdwp_options_ << std::endl
+ << " -Xplugin:libopenjdkjvmti" << (kIsDebugBuild ? "d" : "") << ".so "
+ << "-agentpath:libjdwp.so=" << jdwp_options_ << std::endl
+ << (has_transport ? "" : adb_connection_args);
+ }
break;
}
case JdwpProvider::kInternal: {
@@ -1551,6 +1578,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
}
static bool EnsureJvmtiPlugin(Runtime* runtime,
+ bool allow_non_debuggable_tooling,
std::vector<Plugin>* plugins,
std::string* error_msg) {
constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
@@ -1562,9 +1590,9 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
}
}
- // Is the process debuggable? Otherwise, do not attempt to load the plugin.
- // TODO Support a crimped jvmti for non-debuggable runtimes.
- if (!runtime->IsJavaDebuggable()) {
+ // Is the process debuggable? Otherwise, do not attempt to load the plugin unless we are
+ // specifically allowed.
+ if (!allow_non_debuggable_tooling && !runtime->IsJavaDebuggable()) {
*error_msg = "Process is not debuggable.";
return false;
}
@@ -1585,9 +1613,12 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
// revisit this and make sure we're doing this on the right thread
// (and we synchronize access to any shared data structures like "agents_")
//
-void Runtime::AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader) {
+void Runtime::AttachAgent(JNIEnv* env,
+ const std::string& agent_arg,
+ jobject class_loader,
+ bool allow_non_debuggable_tooling) {
std::string error_msg;
- if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+ if (!EnsureJvmtiPlugin(this, allow_non_debuggable_tooling, &plugins_, &error_msg)) {
LOG(WARNING) << "Could not load plugin: " << error_msg;
ScopedObjectAccess soa(Thread::Current());
ThrowIOException("%s", error_msg.c_str());
@@ -1840,7 +1871,13 @@ void Runtime::BlockSignals() {
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
ScopedTrace trace(__FUNCTION__);
- return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
+ Thread* self = Thread::Attach(thread_name, as_daemon, thread_group, create_peer);
+ // Run ThreadGroup.add to notify the group that this thread is now started.
+ if (self != nullptr && create_peer && !IsAotCompiler()) {
+ ScopedObjectAccess soa(self);
+ self->NotifyThreadGroup(soa, thread_group);
+ }
+ return self != nullptr;
}
void Runtime::DetachCurrentThread() {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index c8edabce09..7ab9be5c5b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -520,6 +520,38 @@ class Runtime {
bool IsVerificationEnabled() const;
bool IsVerificationSoftFail() const;
+ void SetHiddenApiChecksEnabled(bool value) {
+ do_hidden_api_checks_ = value;
+ }
+
+ bool AreHiddenApiChecksEnabled() const {
+ return do_hidden_api_checks_;
+ }
+
+ void SetPendingHiddenApiWarning(bool value) {
+ pending_hidden_api_warning_ = value;
+ }
+
+ bool HasPendingHiddenApiWarning() const {
+ return pending_hidden_api_warning_;
+ }
+
+ void SetDedupeHiddenApiWarnings(bool value) {
+ dedupe_hidden_api_warnings_ = value;
+ }
+
+ bool ShouldDedupeHiddenApiWarnings() {
+ return dedupe_hidden_api_warnings_;
+ }
+
+ void AlwaysSetHiddenApiWarningFlag() {
+ always_set_hidden_api_warning_flag_ = true;
+ }
+
+ bool ShouldAlwaysSetHiddenApiWarningFlag() const {
+ return always_set_hidden_api_warning_flag_;
+ }
+
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
@@ -661,7 +693,10 @@ class Runtime {
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
- void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
+ void AttachAgent(JNIEnv* env,
+ const std::string& agent_arg,
+ jobject class_loader,
+ bool allow_non_debuggable_tooling = false);
const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
return agents_;
@@ -706,6 +741,8 @@ class Runtime {
return jdwp_provider_;
}
+ static constexpr int32_t kUnsetSdkVersion = 0u;
+
private:
static void InitPlatformSignalHandlers();
@@ -952,6 +989,22 @@ class Runtime {
// Whether the application should run in safe mode, that is, interpreter only.
bool safe_mode_;
+ // Whether access checks on hidden API should be performed.
+ bool do_hidden_api_checks_;
+
+ // Whether the application has used an API which is not restricted but we
+ // should issue a warning about it.
+ bool pending_hidden_api_warning_;
+
+ // Do not warn about the same hidden API access violation twice.
+ // This is only used for testing.
+ bool dedupe_hidden_api_warnings_;
+
+ // Hidden API can print warnings into the log and/or set a flag read by the
+ // framework to show a UI warning. If this flag is set, always set the flag
+ // when there is a warning. This is only used for testing.
+ bool always_set_hidden_api_warning_flag_;
+
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
diff --git a/runtime/runtime_intrinsics.cc b/runtime/runtime_intrinsics.cc
index f710ebeb4c..3295a86e59 100644
--- a/runtime/runtime_intrinsics.cc
+++ b/runtime/runtime_intrinsics.cc
@@ -18,8 +18,8 @@
#include "art_method-inl.h"
#include "class_linker.h"
+#include "dex/invoke_type.h"
#include "intrinsics_enum.h"
-#include "invoke_type.h"
#include "mirror/class.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 1dd3de5039..e78d952c1c 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -44,7 +44,7 @@ RUNTIME_OPTIONS_KEY (std::string, Image)
RUNTIME_OPTIONS_KEY (Unit, CheckJni)
RUNTIME_OPTIONS_KEY (Unit, JniOptsForceCopy)
RUNTIME_OPTIONS_KEY (std::string, JdwpOptions, "")
-RUNTIME_OPTIONS_KEY (JdwpProvider, JdwpProvider, JdwpProvider::kInternal)
+RUNTIME_OPTIONS_KEY (JdwpProvider, JdwpProvider, JdwpProvider::kNone)
RUNTIME_OPTIONS_KEY (MemoryKiB, MemoryMaximumSize, gc::Heap::kDefaultMaximumSize) // -Xmx
RUNTIME_OPTIONS_KEY (MemoryKiB, MemoryInitialSize, gc::Heap::kDefaultInitialSize) // -Xms
RUNTIME_OPTIONS_KEY (MemoryKiB, HeapGrowthLimit) // Default is 0 for unlimited
@@ -118,6 +118,8 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
ImageCompilerOptions) // -Ximage-compiler-option ...
RUNTIME_OPTIONS_KEY (verifier::VerifyMode, \
Verify, verifier::VerifyMode::kEnable)
+RUNTIME_OPTIONS_KEY (int, TargetSdkVersion, Runtime::kUnsetSdkVersion)
+RUNTIME_OPTIONS_KEY (Unit, NoHiddenApiChecks)
RUNTIME_OPTIONS_KEY (std::string, NativeBridge)
RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index dfdea28ae8..229238e0f7 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -154,7 +154,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
return cur_shadow_frame_->GetVRegReference(0);
}
} else {
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (!accessor.HasCodeItem()) {
UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
<< ArtMethod::PrettyMethod(m);
@@ -225,7 +225,7 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
DCHECK_EQ(m, GetMethod());
// Can't be null or how would we compile its instructions?
DCHECK(m->GetCodeItem() != nullptr) << m->PrettyMethod();
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
DCHECK_LT(vreg, number_of_dex_registers);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
@@ -395,7 +395,7 @@ bool StackVisitor::SetVReg(ArtMethod* m,
uint16_t vreg,
uint32_t new_value,
VRegKind kind) {
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (!accessor.HasCodeItem()) {
return false;
}
@@ -432,7 +432,7 @@ bool StackVisitor::SetVRegPair(ArtMethod* m,
LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
UNREACHABLE();
}
- CodeItemDataAccessor accessor(m);
+ CodeItemDataAccessor accessor(m->DexInstructionData());
if (!accessor.HasCodeItem()) {
return false;
}
diff --git a/runtime/string_reference.h b/runtime/string_reference.h
index 97661c6019..1ee5d6d53a 100644
--- a/runtime/string_reference.h
+++ b/runtime/string_reference.h
@@ -24,7 +24,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_reference.h"
#include "dex/dex_file_types.h"
-#include "utf-inl.h"
+#include "dex/utf-inl.h"
namespace art {
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 4305ff849a..462f203978 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -26,22 +26,22 @@ namespace art {
/**
* The SubtypeCheckBits memory layout (in bits):
*
- * Variable
- * |
- * <---- up to 23 bits ----> v +---> 1 bit
- * |
- * +-------------------------+--------+-----------+---++
- * | Bitstring | |
- * +-------------------------+--------+-----------+ |
- * | Path To Root | Next | (unused) | OF |
- * +---+---------------------+--------+ | |
- * | | | | | ... | | (0....0) | |
- * +---+---------------------+--------+-----------+----+
- * MSB LSB
+ * 1 bit Variable
+ * | |
+ * v v <---- up to 23 bits ---->
+ *
+ * +----+-----------+--------+-------------------------+
+ * | | Bitstring |
+ * + +-----------+--------+-------------------------+
+ * | OF | (unused) | Next | Path To Root |
+ * + | |--------+----+----------+----+----+
+ * | | (0....0) | | | ... | | |
+ * +----+-----------+--------+----+----------+----+----+
+ * MSB (most significant bit) LSB
*
* The bitstring takes up to 23 bits; anything exceeding that is truncated:
* - Path To Root is a list of chars, encoded as a BitString:
- * starting at the root (in MSB), each character is a sibling index unique to the parent,
+ * starting at the root (in LSB), each character is a sibling index unique to the parent,
* Paths longer than BitString::kCapacity are truncated to fit within the BitString.
* - Next is a single BitStringChar (immediatelly following Path To Root)
* When new children are assigned paths, they get allocated the parent's Next value.
@@ -57,8 +57,8 @@ namespace art {
* See subtype_check.h and subtype_check_info.h for more details.
*/
BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
- BitStructUint</*lsb*/0, /*width*/1> overflow_;
- BitStructField<BitString, /*lsb*/1> bitstring_;
+ BitStructField<BitString, /*lsb*/ 0> bitstring_;
+ BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
BITSTRUCT_DEFINE_END(SubtypeCheckBits);
} // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 11cb9b9d21..321a723985 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -19,6 +19,7 @@
#include "base/bit_struct.h"
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "class_status.h"
#include "subtype_check_bits.h"
@@ -36,13 +37,13 @@ static constexpr size_t NonNumericBitSizeOf() {
}
/**
- * MSB LSB
- * +---------------------------------------------------+---------------+
- * | | |
- * | SubtypeCheckBits | ClassStatus |
- * | | |
- * +---------------------------------------------------+---------------+
- * <----- 24 bits -----> <-- 8 bits -->
+ * MSB (most significant bit) LSB
+ * +---------------+---------------------------------------------------+
+ * | | |
+ * | ClassStatus | SubtypeCheckBits |
+ * | | |
+ * +---------------+---------------------------------------------------+
+ * <-- 4 bits --> <----- 28 bits ----->
*
* Invariants:
*
@@ -53,20 +54,25 @@ static constexpr size_t NonNumericBitSizeOf() {
* This enables a highly efficient path comparison between any two labels:
*
* src <: target :=
- * src >> (32 - len(path-to-root(target))) == target >> (32 - len(path-to-root(target))
+ * (src & mask) == (target & mask) where mask := (1u << len(path-to-root(target)) - 1u
*
- * In the above example, the RHS operands are a function of the depth. Since the target
- * is known at compile time, it becomes:
- *
- * src >> #imm_target_shift == #imm
+ * In the above example, the `len()` (and thus `mask`) is a function of the depth.
+ * Since the target is known at compile time, it becomes
+ * (src & #imm_mask) == #imm
+ * or
+ * ((src - #imm) << #imm_shift_to_remove_high_bits) == 0
+ * or a similar expression chosen for the best performance or code size.
*
* (This requires that path-to-root in `target` is not truncated, i.e. it is in the Assigned state).
*/
-static constexpr size_t kClassStatusBitSize = 8u; // NonNumericBitSizeOf<ClassStatus>()
+static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
+static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
- BitStructField<ClassStatus, /*lsb*/0, /*width*/kClassStatusBitSize> status_;
- BitStructField<SubtypeCheckBits, /*lsb*/kClassStatusBitSize> subtype_check_info_;
- BitStructInt</*lsb*/0, /*width*/BitSizeOf<BitString::StorageType>()> int32_alias_;
+ BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+ BitStructField<ClassStatus,
+ /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
+ /*width*/ kClassStatusBitSize> status_;
+ BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
// Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info.h b/runtime/subtype_check_info.h
index 61d590bd59..08db77030e 100644
--- a/runtime/subtype_check_info.h
+++ b/runtime/subtype_check_info.h
@@ -296,8 +296,7 @@ struct SubtypeCheckInfo {
BitString::StorageType GetEncodedPathToRoot() const {
BitString::StorageType data = static_cast<BitString::StorageType>(GetPathToRoot());
// Bit strings are logically in the least-significant memory.
- // Shift it so the bits are all most-significant.
- return data << (BitSizeOf(data) - BitStructSizeOf<BitString>());
+ return data;
}
// Retrieve the path to root bitstring mask as a plain uintN_t that is amenable to
@@ -305,17 +304,7 @@ struct SubtypeCheckInfo {
BitString::StorageType GetEncodedPathToRootMask() const {
size_t num_bitchars = GetSafeDepth();
size_t bitlength = BitString::GetBitLengthTotalAtPosition(num_bitchars);
-
- BitString::StorageType mask_all =
- std::numeric_limits<BitString::StorageType>::max();
- BitString::StorageType mask_lsb =
- MaskLeastSignificant<BitString::StorageType>(
- BitSizeOf<BitString::StorageType>() - bitlength);
-
- BitString::StorageType result = mask_all & ~mask_lsb;
-
- // TODO: refactor above code into MaskMostSignificant?
- return result;
+ return MaskLeastSignificant<BitString::StorageType>(bitlength);
}
// Get the "Next" bitchar, assuming that there is one to get.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 338d75a285..91fcc07d65 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -65,7 +65,7 @@ size_t AsUint(const T& value) {
return uint_value;
}
-// Make max bistring, e.g. BitString[4095,7,255] for {12,3,8}
+// Make max bistring, e.g. BitString[4095,15,2047] for {12,4,11}
template <size_t kCount = BitString::kCapacity>
BitString MakeBitStringMax() {
BitString bs{};
@@ -258,60 +258,62 @@ size_t LenForPos() { return BitString::GetBitLengthTotalAtPosition(kPos); }
TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
using StorageType = BitString::StorageType;
- SubtypeCheckInfo io =
+ SubtypeCheckInfo sci =
MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
/*next*/BitStringChar{},
/*overflow*/false,
/*depth*/BitString::kCapacity);
- // 0b11111...000 where MSB == 1, and leading 1s = the maximum bitstring representation.
- EXPECT_EQ(MaxInt<StorageType>(LenForPos()) << (BitSizeOf<StorageType>() - LenForPos()),
- io.GetEncodedPathToRoot());
-
- EXPECT_EQ(MaxInt<StorageType>(LenForPos()) << (BitSizeOf<StorageType>() - LenForPos()),
- io.GetEncodedPathToRootMask());
-
- // 0b11111...000 where MSB == 1, and leading 1s = the maximum bitstring representation.
+ // 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
+ EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
// The rest of this test is written assuming kCapacity == 3 for convenience.
// Please update the test if this changes.
ASSERT_EQ(3u, BitString::kCapacity);
ASSERT_EQ(12u, BitString::kBitSizeAtPosition[0]);
- ASSERT_EQ(3u, BitString::kBitSizeAtPosition[1]);
- ASSERT_EQ(8u, BitString::kBitSizeAtPosition[2]);
+ ASSERT_EQ(4u, BitString::kBitSizeAtPosition[1]);
+ ASSERT_EQ(11u, BitString::kBitSizeAtPosition[2]);
- SubtypeCheckInfo io2 =
+ SubtypeCheckInfo sci2 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
/*overflow*/false,
/*depth*/BitString::kCapacity);
-#define MAKE_ENCODED_PATH(pos0, pos1, pos2) (((pos0) << 3u << 8u << 9u) | ((pos1) << 8u << 9u) | ((pos2) << 9u))
+#define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
+ (((pos0) << 0) | \
+ ((pos1) << BitString::kBitSizeAtPosition[0]) | \
+ ((pos2) << (BitString::kBitSizeAtPosition[0] + BitString::kBitSizeAtPosition[1])))
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io2.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b11111111), io2.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci2.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b11111111111),
+ sci2.GetEncodedPathToRootMask());
- SubtypeCheckInfo io3 =
+ SubtypeCheckInfo sci3 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
/*overflow*/false,
/*depth*/BitString::kCapacity - 1u);
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io3.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io3.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci3.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci3.GetEncodedPathToRootMask());
- SubtypeCheckInfo io4 =
+ SubtypeCheckInfo sci4 =
MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
/*overflow*/false,
/*depth*/BitString::kCapacity - 2u);
- EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b000, 0b0), io4.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b000, 0b0), io4.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
+ sci4.GetEncodedPathToRootMask());
}
TEST_F(SubtypeCheckInfoTest, NewForRoot) {
- SubtypeCheckInfo io = SubtypeCheckInfo::CreateRoot();
- EXPECT_EQ(SubtypeCheckInfo::kAssigned, io.GetState()); // Root is always assigned.
- EXPECT_EQ(0u, GetPathToRoot(io).Length()); // Root's path length is 0.
- EXPECT_TRUE(HasNext(io)); // Root always has a "Next".
- EXPECT_EQ(MakeBitStringChar(1u), io.GetNext()); // Next>=1 to disambiguate from Uninitialized.
+ SubtypeCheckInfo sci = SubtypeCheckInfo::CreateRoot();
+ EXPECT_EQ(SubtypeCheckInfo::kAssigned, sci.GetState()); // Root is always assigned.
+ EXPECT_EQ(0u, GetPathToRoot(sci).Length()); // Root's path length is 0.
+ EXPECT_TRUE(HasNext(sci)); // Root always has a "Next".
+ EXPECT_EQ(MakeBitStringChar(1u), sci.GetNext()); // Next>=1 to disambiguate from Uninitialized.
}
TEST_F(SubtypeCheckInfoTest, CopyCleared) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9f4e5441a5..9dc92f3788 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2049,20 +2049,8 @@ void Thread::FinishStartup() {
// The thread counts as started from now on. We need to add it to the ThreadGroup. For regular
// threads, this is done in Thread.start() on the Java side.
- {
- // This is only ever done once. There's no benefit in caching the method.
- jmethodID thread_group_add = soa.Env()->GetMethodID(WellKnownClasses::java_lang_ThreadGroup,
- "add",
- "(Ljava/lang/Thread;)V");
- CHECK(thread_group_add != nullptr);
- ScopedLocalRef<jobject> thread_jobject(
- soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer()));
- soa.Env()->CallNonvirtualVoidMethod(runtime->GetMainThreadGroup(),
- WellKnownClasses::java_lang_ThreadGroup,
- thread_group_add,
- thread_jobject.get());
- Thread::Current()->AssertNoPendingException();
- }
+ Thread::Current()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
+ Thread::Current()->AssertNoPendingException();
}
void Thread::Shutdown() {
@@ -2076,6 +2064,28 @@ void Thread::Shutdown() {
}
}
+void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group) {
+ ScopedLocalRef<jobject> thread_jobject(
+ soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer()));
+ ScopedLocalRef<jobject> thread_group_jobject_scoped(
+ soa.Env(), nullptr);
+ jobject thread_group_jobject = thread_group;
+ if (thread_group == nullptr || kIsDebugBuild) {
+ // There is always a group set. Retrieve it.
+ thread_group_jobject_scoped.reset(
+ soa.Env()->GetObjectField(thread_jobject.get(),
+ WellKnownClasses::java_lang_Thread_group));
+ thread_group_jobject = thread_group_jobject_scoped.get();
+ if (kIsDebugBuild && thread_group != nullptr) {
+ CHECK(soa.Env()->IsSameObject(thread_group, thread_group_jobject));
+ }
+ }
+ soa.Env()->CallNonvirtualVoidMethod(thread_group_jobject,
+ WellKnownClasses::java_lang_ThreadGroup,
+ WellKnownClasses::java_lang_ThreadGroup_add,
+ thread_jobject.get());
+}
+
Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
@@ -2743,6 +2753,199 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
return result;
}
+jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
+ // This code allocates. Do not allow it to operate with a pending exception.
+ if (IsExceptionPending()) {
+ return nullptr;
+ }
+
+ // If flip_function is not null, it means we have run a checkpoint
+ // before the thread wakes up to execute the flip function and the
+ // thread roots haven't been forwarded. So the following access to
+ // the roots (locks or methods in the frames) would be bad. Run it
+ // here. TODO: clean up.
+ // Note: copied from DumpJavaStack.
+ {
+ Thread* this_thread = const_cast<Thread*>(this);
+ Closure* flip_func = this_thread->GetFlipFunction();
+ if (flip_func != nullptr) {
+ flip_func->Run(this_thread);
+ }
+ }
+
+ class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor {
+ public:
+ CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in,
+ Thread* self,
+ Context* context)
+ : MonitorObjectsStackVisitor(self, context),
+ wait_jobject_(soaa_in.Env(), nullptr),
+ block_jobject_(soaa_in.Env(), nullptr),
+ soaa_(soaa_in) {}
+
+ protected:
+ VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
+ soaa_, m, GetDexPc(/* abort on error */ false));
+ if (obj == nullptr) {
+ return VisitMethodResult::kEndStackWalk;
+ }
+ stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr()));
+ return VisitMethodResult::kContinueMethod;
+ }
+
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ lock_objects_.push_back({});
+ lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
+
+ DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size());
+
+ return VisitMethodResult::kContinueMethod;
+ }
+
+ void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitSleepingObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitBlockedOnObject(mirror::Object* obj,
+ ThreadState state ATTRIBUTE_UNUSED,
+ uint32_t owner_tid ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitLockedObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
+ }
+
+ public:
+ std::vector<ScopedLocalRef<jobject>> stack_trace_elements_;
+ ScopedLocalRef<jobject> wait_jobject_;
+ ScopedLocalRef<jobject> block_jobject_;
+ std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_;
+
+ private:
+ const ScopedObjectAccessAlreadyRunnable& soaa_;
+
+ std::vector<ScopedLocalRef<jobject>> frame_lock_objects_;
+ };
+
+ std::unique_ptr<Context> context(Context::Create());
+ CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get());
+ dumper.WalkStack();
+
+ // There should not be a pending exception. Otherwise, return with it pending.
+ if (IsExceptionPending()) {
+ return nullptr;
+ }
+
+ // Now go and create Java arrays.
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ StackHandleScope<6> hs(soa.Self());
+ mirror::Class* aste_array_class = class_linker->FindClass(
+ soa.Self(),
+ "[Ldalvik/system/AnnotatedStackTraceElement;",
+ ScopedNullHandle<mirror::ClassLoader>());
+ if (aste_array_class == nullptr) {
+ return nullptr;
+ }
+ Handle<mirror::Class> h_aste_array_class(hs.NewHandle<mirror::Class>(aste_array_class));
+
+ mirror::Class* o_array_class = class_linker->FindClass(soa.Self(),
+ "[Ljava/lang/Object;",
+ ScopedNullHandle<mirror::ClassLoader>());
+ if (o_array_class == nullptr) {
+ // This should not fail in a healthy runtime.
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ Handle<mirror::Class> h_o_array_class(hs.NewHandle<mirror::Class>(o_array_class));
+
+ Handle<mirror::Class> h_aste_class(hs.NewHandle<mirror::Class>(
+ h_aste_array_class->GetComponentType()));
+ ArtField* stack_trace_element_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "stackTraceElement", "Ljava/lang/StackTraceElement;");
+ DCHECK(stack_trace_element_field != nullptr);
+ ArtField* held_locks_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "heldLocks", "[Ljava/lang/Object;");
+ DCHECK(held_locks_field != nullptr);
+ ArtField* blocked_on_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "blockedOn", "Ljava/lang/Object;");
+ DCHECK(blocked_on_field != nullptr);
+
+ size_t length = dumper.stack_trace_elements_.size();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), aste_array_class, length);
+ if (array == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array));
+
+ MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
+ MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
+ hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
+ for (size_t i = 0; i != length; ++i) {
+ handle.Assign(h_aste_class->AllocObject(soa.Self()));
+ if (handle == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ // Set stack trace element.
+ stack_trace_element_field->SetObject<false>(
+ handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get()));
+
+ // Create locked-on array.
+ if (!dumper.lock_objects_[i].empty()) {
+ handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
+ h_o_array_class.Get(),
+ dumper.lock_objects_[i].size()));
+ if (handle2 == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ int32_t j = 0;
+ for (auto& scoped_local : dumper.lock_objects_[i]) {
+ if (scoped_local == nullptr) {
+ continue;
+ }
+ handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get()));
+ DCHECK(!soa.Self()->IsExceptionPending());
+ j++;
+ }
+ held_locks_field->SetObject<false>(handle.Get(), handle2.Get());
+ }
+
+ // Set blocked-on object.
+ if (i == 0) {
+ if (dumper.block_jobject_ != nullptr) {
+ blocked_on_field->SetObject<false>(
+ handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get()));
+ }
+ }
+
+ ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
+ soa.Env()->SetObjectArrayElement(result.get(), i, elem.get());
+ DCHECK(!soa.Self()->IsExceptionPending());
+ }
+
+ return result.release();
+}
+
void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
@@ -3425,7 +3628,7 @@ class ReferenceMapVisitor : public StackVisitor {
const CodeInfoEncoding& _encoding,
const StackMap& map,
RootVisitor& _visitor)
- : number_of_dex_registers(CodeItemDataAccessor(method).RegistersSize()),
+ : number_of_dex_registers(method->DexInstructionData().RegistersSize()),
code_info(_code_info),
encoding(_encoding),
dex_register_map(code_info.GetDexRegisterMapOf(map,
diff --git a/runtime/thread.h b/runtime/thread.h
index 1e89887c3e..295685e799 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -511,6 +511,12 @@ class Thread {
static void FinishStartup();
static void Shutdown();
+ // Notify this thread's thread-group that this thread has started.
+ // Note: the given thread-group is used as a fast path and verified in debug build. If the value
+ // is null, the thread's thread-group is loaded from the peer.
+ void NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject thread_group = nullptr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// JNI methods
JNIEnvExt* GetJniEnv() const {
return tlsPtr_.jni_env;
@@ -599,6 +605,9 @@ class Thread {
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
bool HasDebuggerShadowFrames() const {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 62bdde6790..15c514e593 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -117,15 +117,18 @@ std::unique_ptr<Agent> AgentSpec::DoDlOpen(JNIEnv* env,
: JavaVMExt::GetLibrarySearchPath(env, class_loader));
bool needs_native_bridge = false;
+ std::string nativeloader_error_msg;
void* dlopen_handle = android::OpenNativeLibrary(env,
Runtime::Current()->GetTargetSdkVersion(),
name_.c_str(),
class_loader,
library_path.get(),
&needs_native_bridge,
- error_msg);
+ &nativeloader_error_msg);
if (dlopen_handle == nullptr) {
- *error_msg = StringPrintf("Unable to dlopen %s: %s", name_.c_str(), dlerror());
+ *error_msg = StringPrintf("Unable to dlopen %s: %s",
+ name_.c_str(),
+ nativeloader_error_msg.c_str());
*error = kLoadingError;
return nullptr;
}
diff --git a/runtime/type_lookup_table.cc b/runtime/type_lookup_table.cc
index 6eb3d83631..925a9089cb 100644
--- a/runtime/type_lookup_table.cc
+++ b/runtime/type_lookup_table.cc
@@ -21,7 +21,7 @@
#include "base/bit_utils.h"
#include "dex/dex_file-inl.h"
-#include "utf-inl.h"
+#include "dex/utf-inl.h"
#include "utils.h"
namespace art {
@@ -66,7 +66,7 @@ std::unique_ptr<TypeLookupTable> TypeLookupTable::Open(const uint8_t* dex_file_p
}
TypeLookupTable::TypeLookupTable(const DexFile& dex_file, uint8_t* storage)
- : dex_file_begin_(dex_file.Begin()),
+ : dex_data_begin_(dex_file.DataBegin()),
raw_data_length_(RawDataLength(dex_file.NumClassDefs())),
mask_(CalculateMask(dex_file.NumClassDefs())),
entries_(storage != nullptr ? reinterpret_cast<Entry*>(storage) : new Entry[mask_ + 1]),
@@ -106,7 +106,7 @@ TypeLookupTable::TypeLookupTable(const DexFile& dex_file, uint8_t* storage)
TypeLookupTable::TypeLookupTable(const uint8_t* dex_file_pointer,
const uint8_t* raw_data,
uint32_t num_class_defs)
- : dex_file_begin_(dex_file_pointer),
+ : dex_data_begin_(dex_file_pointer),
raw_data_length_(RawDataLength(num_class_defs)),
mask_(CalculateMask(num_class_defs)),
entries_(reinterpret_cast<Entry*>(const_cast<uint8_t*>(raw_data))),
diff --git a/runtime/type_lookup_table.h b/runtime/type_lookup_table.h
index 6a6f47fba2..a1f9519f18 100644
--- a/runtime/type_lookup_table.h
+++ b/runtime/type_lookup_table.h
@@ -18,8 +18,8 @@
#define ART_RUNTIME_TYPE_LOOKUP_TABLE_H_
#include "dex/dex_file_types.h"
+#include "dex/utf.h"
#include "leb128.h"
-#include "utf.h"
namespace art {
@@ -43,7 +43,7 @@ class TypeLookupTable {
// Method search class_def_idx by class descriptor and it's hash.
// If no data found then the method returns dex::kDexNoIndex.
- ALWAYS_INLINE uint32_t Lookup(const char* str, uint32_t hash) const {
+ uint32_t Lookup(const char* str, uint32_t hash) const {
uint32_t pos = hash & GetSizeMask();
// Thanks to special insertion algorithm, the element at position pos can be empty or start of
// bucket.
@@ -127,8 +127,8 @@ class TypeLookupTable {
uint32_t num_class_defs);
bool IsStringsEquals(const char* str, uint32_t str_offset) const {
- const uint8_t* ptr = dex_file_begin_ + str_offset;
- CHECK(dex_file_begin_ != nullptr);
+ const uint8_t* ptr = dex_data_begin_ + str_offset;
+ CHECK(dex_data_begin_ != nullptr);
// Skip string length.
DecodeUnsignedLeb128(&ptr);
return CompareModifiedUtf8ToModifiedUtf8AsUtf16CodePointValues(
@@ -160,7 +160,7 @@ class TypeLookupTable {
// Find the last entry in a chain.
uint32_t FindLastEntryInBucket(uint32_t cur_pos) const;
- const uint8_t* dex_file_begin_;
+ const uint8_t* dex_data_begin_;
const uint32_t raw_data_length_;
const uint32_t mask_;
std::unique_ptr<Entry[]> entries_;
diff --git a/runtime/type_lookup_table_test.cc b/runtime/type_lookup_table_test.cc
index d04652a8e7..b6ab6da78c 100644
--- a/runtime/type_lookup_table_test.cc
+++ b/runtime/type_lookup_table_test.cc
@@ -20,8 +20,8 @@
#include "common_runtime_test.h"
#include "dex/dex_file-inl.h"
+#include "dex/utf-inl.h"
#include "scoped_thread_state_change-inl.h"
-#include "utf-inl.h"
namespace art {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index bd4175f5fd..393b18e1b3 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -26,13 +26,12 @@
#include <memory>
+#include "android-base/file.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-#include "base/file_utils.h"
-#include "dex/dex_file-inl.h"
+#include "dex/utf-inl.h"
#include "os.h"
-#include "utf-inl.h"
#if defined(__APPLE__)
#include <crt_externs.h>
@@ -46,6 +45,7 @@
namespace art {
+using android::base::ReadFileToString;
using android::base::StringAppendF;
using android::base::StringPrintf;
@@ -63,6 +63,7 @@ pid_t GetTid() {
std::string GetThreadName(pid_t tid) {
std::string result;
+ // TODO: make this less Linux-specific.
if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) {
result.resize(result.size() - 1); // Lose the trailing '\n'.
} else {
@@ -124,41 +125,6 @@ std::string PrettyDescriptor(const char* descriptor) {
return result;
}
-std::string PrettyJavaAccessFlags(uint32_t access_flags) {
- std::string result;
- if ((access_flags & kAccPublic) != 0) {
- result += "public ";
- }
- if ((access_flags & kAccProtected) != 0) {
- result += "protected ";
- }
- if ((access_flags & kAccPrivate) != 0) {
- result += "private ";
- }
- if ((access_flags & kAccFinal) != 0) {
- result += "final ";
- }
- if ((access_flags & kAccStatic) != 0) {
- result += "static ";
- }
- if ((access_flags & kAccAbstract) != 0) {
- result += "abstract ";
- }
- if ((access_flags & kAccInterface) != 0) {
- result += "interface ";
- }
- if ((access_flags & kAccTransient) != 0) {
- result += "transient ";
- }
- if ((access_flags & kAccVolatile) != 0) {
- result += "volatile ";
- }
- if ((access_flags & kAccSynchronized) != 0) {
- result += "synchronized ";
- }
- return result;
-}
-
std::string PrettySize(int64_t byte_count) {
// The byte thresholds at which we display amounts. A byte count is displayed
// in unit U when kUnitThresholds[U] <= bytes < kUnitThresholds[U+1].
@@ -185,57 +151,6 @@ std::string PrettySize(int64_t byte_count) {
negative_str, byte_count / kBytesPerUnit[i], kUnitStrings[i]);
}
-static inline constexpr bool NeedsEscaping(uint16_t ch) {
- return (ch < ' ' || ch > '~');
-}
-
-std::string PrintableChar(uint16_t ch) {
- std::string result;
- result += '\'';
- if (NeedsEscaping(ch)) {
- StringAppendF(&result, "\\u%04x", ch);
- } else {
- result += static_cast<std::string::value_type>(ch);
- }
- result += '\'';
- return result;
-}
-
-std::string PrintableString(const char* utf) {
- std::string result;
- result += '"';
- const char* p = utf;
- size_t char_count = CountModifiedUtf8Chars(p);
- for (size_t i = 0; i < char_count; ++i) {
- uint32_t ch = GetUtf16FromUtf8(&p);
- if (ch == '\\') {
- result += "\\\\";
- } else if (ch == '\n') {
- result += "\\n";
- } else if (ch == '\r') {
- result += "\\r";
- } else if (ch == '\t') {
- result += "\\t";
- } else {
- const uint16_t leading = GetLeadingUtf16Char(ch);
-
- if (NeedsEscaping(leading)) {
- StringAppendF(&result, "\\u%04x", leading);
- } else {
- result += static_cast<std::string::value_type>(leading);
- }
-
- const uint32_t trailing = GetTrailingUtf16Char(ch);
- if (trailing != 0) {
- // All high surrogates will need escaping.
- StringAppendF(&result, "\\u%04x", trailing);
- }
- }
- }
- result += '"';
- return result;
-}
-
std::string GetJniShortName(const std::string& class_descriptor, const std::string& method) {
// Remove the leading 'L' and trailing ';'...
std::string class_name(class_descriptor);
@@ -611,6 +526,7 @@ void SetThreadName(const char* thread_name) {
void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) {
*utime = *stime = *task_cpu = 0;
std::string stats;
+ // TODO: make this less Linux-specific.
if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) {
return;
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 789498ce09..443b0cc398 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -67,12 +67,6 @@ static inline uint32_t PointerToLowMemUInt32(const void* p) {
return intp & 0xFFFFFFFFU;
}
-std::string PrintableChar(uint16_t ch);
-
-// Returns an ASCII string corresponding to the given UTF-8 string.
-// Java escapes are used for non-ASCII characters.
-std::string PrintableString(const char* utf8);
-
// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
// one of which is probably more useful to you.
// Returns a human-readable equivalent of 'descriptor'. So "I" would be "int",
@@ -82,10 +76,6 @@ void AppendPrettyDescriptor(const char* descriptor, std::string* result);
std::string PrettyDescriptor(const char* descriptor);
std::string PrettyDescriptor(Primitive::Type type);
-// Returns a human-readable version of the Java part of the access flags, e.g., "private static "
-// (note the trailing whitespace).
-std::string PrettyJavaAccessFlags(uint32_t access_flags);
-
// Returns a human-readable size string such as "1MB".
std::string PrettySize(int64_t size_in_bytes);
@@ -289,6 +279,20 @@ static inline void CheckedCall(const Func& function, const char* what, Args... a
}
}
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index a53556ffcc..7428e98dbb 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -19,15 +19,20 @@
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <memory>
+#include <unordered_set>
#include <android-base/logging.h>
#include "base/bit_utils.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex_to_dex_decompiler.h"
+#include "hidden_api_access_flags.h"
+#include "leb128.h"
+#include "quicken_info.h"
namespace art {
@@ -45,10 +50,12 @@ bool VdexFile::Header::IsVersionValid() const {
VdexFile::Header::Header(uint32_t number_of_dex_files,
uint32_t dex_size,
+ uint32_t dex_shared_data_size,
uint32_t verifier_deps_size,
uint32_t quickening_info_size)
: number_of_dex_files_(number_of_dex_files),
dex_size_(dex_size),
+ dex_shared_data_size_(dex_shared_data_size),
verifier_deps_size_(verifier_deps_size),
quickening_info_size_(quickening_info_size) {
memcpy(magic_, kVdexMagic, sizeof(kVdexMagic));
@@ -57,11 +64,14 @@ VdexFile::Header::Header(uint32_t number_of_dex_files,
DCHECK(IsVersionValid());
}
-std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- bool unquicken,
- std::string* error_msg) {
+std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg) {
if (!OS::FileExists(vdex_filename.c_str())) {
*error_msg = "File " + vdex_filename + " does not exist.";
return nullptr;
@@ -85,23 +95,43 @@ std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename,
return nullptr;
}
- return Open(vdex_file->Fd(), vdex_length, vdex_filename, writable, low_4gb, unquicken, error_msg);
+ return OpenAtAddress(mmap_addr,
+ mmap_size,
+ mmap_reuse,
+ vdex_file->Fd(),
+ vdex_length,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
}
-std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
- size_t vdex_length,
- const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- bool unquicken,
- std::string* error_msg) {
- std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg) {
+ if (mmap_addr != nullptr && mmap_size < vdex_length) {
+ LOG(WARNING) << "Insufficient pre-allocated space to mmap vdex.";
+ mmap_addr = nullptr;
+ mmap_reuse = false;
+ }
+ CHECK(!mmap_reuse || mmap_addr != nullptr);
+ std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+ mmap_addr,
vdex_length,
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
unquicken ? MAP_PRIVATE : MAP_SHARED,
file_fd,
0 /* start offset */,
low_4gb,
+ mmap_reuse,
vdex_filename.c_str(),
error_msg));
if (mmap == nullptr) {
@@ -120,9 +150,8 @@ std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
if (!vdex->OpenAllDexFiles(&unique_ptr_dex_files, error_msg)) {
return nullptr;
}
- Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
- vdex->GetQuickeningInfo(),
- /* decompile_return_instruction */ false);
+ vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
+ /* decompile_return_instruction */ false);
// Update the quickening info size to pretend there isn't any.
reinterpret_cast<Header*>(vdex->mmap_->Begin())->quickening_info_size_ = 0;
}
@@ -135,19 +164,21 @@ const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor) const {
DCHECK(cursor == nullptr || (cursor > Begin() && cursor <= End()));
if (cursor == nullptr) {
// Beginning of the iteration, return the first dex file if there is one.
- return HasDexSection() ? DexBegin() : nullptr;
+ return HasDexSection() ? DexBegin() + sizeof(QuickeningTableOffsetType) : nullptr;
} else {
// Fetch the next dex file. Return null if there is none.
const uint8_t* data = cursor + reinterpret_cast<const DexFile::Header*>(cursor)->file_size_;
// Dex files are required to be 4 byte aligned. the OatWriter makes sure they are, see
// OatWriter::SeekToDexFiles.
data = AlignUp(data, 4);
- return (data == DexEnd()) ? nullptr : data;
+
+ return (data == DexEnd()) ? nullptr : data + sizeof(QuickeningTableOffsetType);
}
}
bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
std::string* error_msg) {
+ const ArtDexFileLoader dex_file_loader;
size_t i = 0;
for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr);
dex_file_start != nullptr;
@@ -156,14 +187,17 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_
// TODO: Supply the location information for a vdex file.
static constexpr char kVdexLocation[] = "";
std::string location = DexFileLoader::GetMultiDexLocation(i, kVdexLocation);
- std::unique_ptr<const DexFile> dex(DexFileLoader::Open(dex_file_start,
- size,
- location,
- GetLocationChecksum(i),
- nullptr /*oat_dex_file*/,
- false /*verify*/,
- false /*verify_checksum*/,
- error_msg));
+ std::unique_ptr<const DexFile> dex(dex_file_loader.OpenWithDataSection(
+ dex_file_start,
+ size,
+ /*data_base*/ nullptr,
+ /*data_size*/ 0u,
+ location,
+ GetLocationChecksum(i),
+ nullptr /*oat_dex_file*/,
+ false /*verify*/,
+ false /*verify_checksum*/,
+ error_msg));
if (dex == nullptr) {
return false;
}
@@ -172,69 +206,87 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_
return true;
}
-void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction) {
- if (quickening_info.size() == 0 && !decompile_return_instruction) {
- // Bail early if there is no quickening info and no need to decompile
- // RETURN_VOID_NO_BARRIER instructions to RETURN_VOID instructions.
- return;
+void VdexFile::Unquicken(const std::vector<const DexFile*>& target_dex_files,
+ bool decompile_return_instruction) const {
+ const uint8_t* source_dex = GetNextDexFileData(nullptr);
+ for (const DexFile* target_dex : target_dex_files) {
+ UnquickenDexFile(*target_dex, source_dex, decompile_return_instruction);
+ source_dex = GetNextDexFileData(source_dex);
}
+ DCHECK(source_dex == nullptr);
+}
- for (uint32_t i = 0; i < dex_files.size(); ++i) {
- UnquickenDexFile(*dex_files[i], quickening_info, decompile_return_instruction);
- }
+uint32_t VdexFile::GetQuickeningInfoTableOffset(const uint8_t* source_dex_begin) const {
+ DCHECK_GE(source_dex_begin, DexBegin());
+ DCHECK_LT(source_dex_begin, DexEnd());
+ return reinterpret_cast<const QuickeningTableOffsetType*>(source_dex_begin)[-1];
}
-typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
+ const uint8_t* source_dex_begin,
+ uint32_t num_method_ids,
+ const ArrayRef<const uint8_t>& quickening_info) const {
+ // The offset a is in preheader right before the dex file.
+ const uint32_t offset = GetQuickeningInfoTableOffset(source_dex_begin);
+ const uint8_t* data_ptr = quickening_info.data() + offset;
+ return QuickenInfoOffsetTableAccessor(data_ptr, num_method_ids);
+}
-static uint32_t GetDebugInfoOffsetInternal(const DexFile& dex_file,
- uint32_t offset_in_code_item,
- const ArrayRef<const uint8_t>& quickening_info) {
- if (quickening_info.size() == 0) {
- // No quickening info: offset is the right one, return it.
- return offset_in_code_item;
- }
- uint32_t quickening_offset = offset_in_code_item - dex_file.Size();
- return *reinterpret_cast<const unaligned_uint32_t*>(quickening_info.data() + quickening_offset);
+QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
+ const DexFile& dex_file,
+ const ArrayRef<const uint8_t>& quickening_info) const {
+ return GetQuickenInfoOffsetTable(dex_file.Begin(), dex_file.NumMethodIds(), quickening_info);
}
-static uint32_t GetQuickeningInfoOffsetFrom(const DexFile& dex_file,
- uint32_t offset_in_code_item,
- const ArrayRef<const uint8_t>& quickening_info) {
- if (offset_in_code_item < dex_file.Size()) {
- return VdexFile::kNoQuickeningInfoOffset;
- }
- if (quickening_info.size() == 0) {
- // No quickening info.
- return VdexFile::kNoQuickeningInfoOffset;
+static ArrayRef<const uint8_t> GetQuickeningInfoAt(const ArrayRef<const uint8_t>& quickening_info,
+ uint32_t quickening_offset) {
+ ArrayRef<const uint8_t> remaining = quickening_info.SubArray(quickening_offset);
+ return remaining.SubArray(0u, QuickenInfoTable::SizeInBytes(remaining));
+}
+
+static uint32_t GetQuickeningInfoOffset(const QuickenInfoOffsetTableAccessor& table,
+ uint32_t dex_method_index,
+ const ArrayRef<const uint8_t>& quickening_info) {
+ DCHECK(!quickening_info.empty());
+ uint32_t remainder;
+ uint32_t offset = table.ElementOffset(dex_method_index, &remainder);
+ // Decode the sizes for the remainder offsets (not covered by the table).
+ while (remainder != 0) {
+ offset += GetQuickeningInfoAt(quickening_info, offset).size();
+ --remainder;
}
- uint32_t quickening_offset = offset_in_code_item - dex_file.Size();
+ return offset;
+}
- // Add 2 * sizeof(uint32_t) for the debug info offset and the data offset.
- CHECK_LE(quickening_offset + 2 * sizeof(uint32_t), quickening_info.size());
- return *reinterpret_cast<const unaligned_uint32_t*>(
- quickening_info.data() + quickening_offset + sizeof(uint32_t));
+void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
+ const DexFile& source_dex_file,
+ bool decompile_return_instruction) const {
+ UnquickenDexFile(target_dex_file, source_dex_file.Begin(), decompile_return_instruction);
}
-static ArrayRef<const uint8_t> GetQuickeningInfoAt(const ArrayRef<const uint8_t>& quickening_info,
- uint32_t quickening_offset) {
- return (quickening_offset == VdexFile::kNoQuickeningInfoOffset)
- ? ArrayRef<const uint8_t>(nullptr, 0)
- : quickening_info.SubArray(
- quickening_offset + sizeof(uint32_t),
- *reinterpret_cast<const unaligned_uint32_t*>(
- quickening_info.data() + quickening_offset));
+static void UpdateAccessFlags(uint8_t* data, uint32_t new_flag, bool is_method) {
+ // Go back 1 uleb to start.
+ data = ReverseSearchUnsignedLeb128(data);
+ if (is_method) {
+ // Methods have another uleb field before the access flags
+ data = ReverseSearchUnsignedLeb128(data);
+ }
+ DCHECK_EQ(HiddenApiAccessFlags::RemoveFromDex(DecodeUnsignedLeb128WithoutMovingCursor(data)),
+ new_flag);
+ UpdateUnsignedLeb128(data, new_flag);
}
void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction) {
+ const uint8_t* source_dex_begin,
+ bool decompile_return_instruction) const {
+ ArrayRef<const uint8_t> quickening_info = GetQuickeningInfo();
if (quickening_info.size() == 0 && !decompile_return_instruction) {
// Bail early if there is no quickening info and no need to decompile
// RETURN_VOID_NO_BARRIER instructions to RETURN_VOID instructions.
return;
}
+ // Make sure to not unquicken the same code item multiple times.
+ std::unordered_set<const DexFile::CodeItem*> unquickened_code_item;
for (uint32_t i = 0; i < target_dex_file.NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = target_dex_file.GetClassDef(i);
const uint8_t* class_data = target_dex_file.GetClassData(class_def);
@@ -242,47 +294,49 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
for (ClassDataItemIterator class_it(target_dex_file, class_data);
class_it.HasNext();
class_it.Next()) {
- if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
+ if (class_it.IsAtMethod()) {
const DexFile::CodeItem* code_item = class_it.GetMethodCodeItem();
- uint32_t quickening_offset = GetQuickeningInfoOffsetFrom(
- target_dex_file, code_item->debug_info_off_, quickening_info);
- if (quickening_offset != VdexFile::kNoQuickeningInfoOffset) {
- // If we have quickening data, put back the original debug_info_off.
- const_cast<DexFile::CodeItem*>(code_item)->SetDebugInfoOffset(
- GetDebugInfoOffsetInternal(target_dex_file,
- code_item->debug_info_off_,
- quickening_info));
+ if (code_item != nullptr && unquickened_code_item.emplace(code_item).second) {
+ ArrayRef<const uint8_t> quicken_data;
+ if (!quickening_info.empty()) {
+ const uint32_t quickening_offset = GetQuickeningInfoOffset(
+ GetQuickenInfoOffsetTable(source_dex_begin,
+ target_dex_file.NumMethodIds(),
+ quickening_info),
+ class_it.GetMemberIndex(),
+ quickening_info);
+ quicken_data = GetQuickeningInfoAt(quickening_info, quickening_offset);
+ }
+ optimizer::ArtDecompileDEX(
+ target_dex_file,
+ *code_item,
+ quicken_data,
+ decompile_return_instruction);
}
- optimizer::ArtDecompileDEX(
- target_dex_file,
- *code_item,
- GetQuickeningInfoAt(quickening_info, quickening_offset),
- decompile_return_instruction);
+ UpdateAccessFlags(const_cast<uint8_t*>(class_it.DataPointer()),
+ class_it.GetMemberAccessFlags(),
+ /*is_method*/ true);
+ } else {
+ UpdateAccessFlags(const_cast<uint8_t*>(class_it.DataPointer()),
+ class_it.GetMemberAccessFlags(),
+ /*is_method*/ false);
}
}
}
}
}
-uint32_t VdexFile::GetDebugInfoOffset(const DexFile& dex_file, uint32_t offset_in_code_item) const {
- return GetDebugInfoOffsetInternal(dex_file, offset_in_code_item, GetQuickeningInfo());
-}
-
-const uint8_t* VdexFile::GetQuickenedInfoOf(const DexFile& dex_file,
- uint32_t code_item_offset) const {
+ArrayRef<const uint8_t> VdexFile::GetQuickenedInfoOf(const DexFile& dex_file,
+ uint32_t dex_method_idx) const {
ArrayRef<const uint8_t> quickening_info = GetQuickeningInfo();
- uint32_t quickening_offset = GetQuickeningInfoOffsetFrom(
- dex_file, dex_file.GetCodeItem(code_item_offset)->debug_info_off_, quickening_info);
-
- return GetQuickeningInfoAt(quickening_info, quickening_offset).data();
-}
-
-bool VdexFile::CanEncodeQuickenedData(const DexFile& dex_file) {
- // We are going to use the debug_info_off_ to signal there is
- // quickened data, by putting a value greater than dex_file.Size(). So
- // make sure we have some room in the offset by checking that we have at least
- // half of the range of a uint32_t.
- return dex_file.Size() <= (std::numeric_limits<uint32_t>::max() >> 1);
+ if (quickening_info.empty()) {
+ return ArrayRef<const uint8_t>();
+ }
+ const uint32_t quickening_offset = GetQuickeningInfoOffset(
+ GetQuickenInfoOffsetTable(dex_file, quickening_info),
+ dex_method_idx,
+ quickening_info);
+ return GetQuickeningInfoAt(quickening_info, quickening_offset);
}
} // namespace art
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 2d9fcab59c..0f347952c9 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "mem_map.h"
#include "os.h"
+#include "quicken_info.h"
namespace art {
@@ -35,18 +36,17 @@ class DexFile;
// File format:
// VdexFile::Header fixed-length header
//
-// DEX[0] array of the input DEX files
-// DEX[1] the bytecode may have been quickened
+// quicken_table_off[0] offset into QuickeningInfo section for offset table for DEX[0].
+// DEX[0] array of the input DEX files, the bytecode may have been quickened.
+// quicken_table_off[1]
+// DEX[1]
// ...
// DEX[D]
// VerifierDeps
// uint8[D][] verification dependencies
// QuickeningInfo
// uint8[D][] quickening data
-// unaligned_uint32_t[D][2][] table of offsets pair:
-// uint32_t[0] contains original CodeItem::debug_info_off_
-// uint32_t[1] contains quickening data offset from the start
-// of QuickeningInfo
+// uint32[D][] quickening data offset tables
class VdexFile {
public:
@@ -54,6 +54,7 @@ class VdexFile {
public:
Header(uint32_t number_of_dex_files_,
uint32_t dex_size,
+ uint32_t dex_shared_data_size,
uint32_t verifier_deps_size,
uint32_t quickening_info_size);
@@ -64,6 +65,7 @@ class VdexFile {
bool IsValid() const { return IsMagicValid() && IsVersionValid(); }
uint32_t GetDexSize() const { return dex_size_; }
+ uint32_t GetDexSharedDataSize() const { return dex_shared_data_size_; }
uint32_t GetVerifierDepsSize() const { return verifier_deps_size_; }
uint32_t GetQuickeningInfoSize() const { return quickening_info_size_; }
uint32_t GetNumberOfDexFiles() const { return number_of_dex_files_; }
@@ -72,6 +74,7 @@ class VdexFile {
return sizeof(Header) +
GetSizeOfChecksumsSection() +
GetDexSize() +
+ GetDexSharedDataSize() +
GetVerifierDepsSize() +
GetQuickeningInfoSize();
}
@@ -84,29 +87,67 @@ class VdexFile {
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- // Last update: Lookup-friendly encoding for quickening info.
- static constexpr uint8_t kVdexVersion[] = { '0', '1', '1', '\0' };
+ // Last update: Fix separate section for compact dex data.
+ static constexpr uint8_t kVdexVersion[] = { '0', '1', '7', '\0' };
uint8_t magic_[4];
uint8_t version_[4];
uint32_t number_of_dex_files_;
uint32_t dex_size_;
+ uint32_t dex_shared_data_size_;
uint32_t verifier_deps_size_;
uint32_t quickening_info_size_;
friend class VdexFile;
};
+ // Note: The file is called "primary" to match the naming with profiles.
+ static const constexpr char* kVdexNameInDmFile = "primary.vdex";
+
typedef uint32_t VdexChecksum;
+ using QuickeningTableOffsetType = uint32_t;
explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
// Returns nullptr if the vdex file cannot be opened or is not valid.
+ // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
+ static std::unique_ptr<VdexFile> OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg);
+
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
+ // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
+ static std::unique_ptr<VdexFile> OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg);
+
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
static std::unique_ptr<VdexFile> Open(const std::string& vdex_filename,
bool writable,
bool low_4gb,
bool unquicken,
- std::string* error_msg);
+ std::string* error_msg) {
+ return OpenAtAddress(nullptr,
+ 0,
+ false,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
+ }
// Returns nullptr if the vdex file cannot be opened or is not valid.
static std::unique_ptr<VdexFile> Open(int file_fd,
@@ -115,7 +156,18 @@ class VdexFile {
bool writable,
bool low_4gb,
bool unquicken,
- std::string* error_msg);
+ std::string* error_msg) {
+ return OpenAtAddress(nullptr,
+ 0,
+ false,
+ file_fd,
+ vdex_length,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
+ }
const uint8_t* Begin() const { return mmap_->Begin(); }
const uint8_t* End() const { return mmap_->End(); }
@@ -127,7 +179,8 @@ class VdexFile {
ArrayRef<const uint8_t> GetVerifierDepsData() const {
return ArrayRef<const uint8_t>(
- DexBegin() + GetHeader().GetDexSize(), GetHeader().GetVerifierDepsSize());
+ DexBegin() + GetHeader().GetDexSize() + GetHeader().GetDexSharedDataSize(),
+ GetHeader().GetVerifierDepsSize());
}
ArrayRef<const uint8_t> GetQuickeningInfo() const {
@@ -160,29 +213,42 @@ class VdexFile {
// `decompile_return_instruction` controls if RETURN_VOID_BARRIER instructions are
// decompiled to RETURN_VOID instructions using the slower ClassDataItemIterator
// instead of the faster QuickeningInfoIterator.
- static void Unquicken(const std::vector<const DexFile*>& dex_files,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction);
+ // Always unquickens using the vdex dex files as the source for quicken tables.
+ void Unquicken(const std::vector<const DexFile*>& target_dex_files,
+ bool decompile_return_instruction) const;
// Fully unquicken `target_dex_file` based on `quickening_info`.
- static void UnquickenDexFile(const DexFile& target_dex_file,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction);
-
- // Return the quickening info of the given code item.
- const uint8_t* GetQuickenedInfoOf(const DexFile& dex_file, uint32_t code_item_offset) const;
-
- uint32_t GetDebugInfoOffset(const DexFile& dex_file, uint32_t offset_in_code_item) const;
+ void UnquickenDexFile(const DexFile& target_dex_file,
+ const DexFile& source_dex_file,
+ bool decompile_return_instruction) const;
- static bool CanEncodeQuickenedData(const DexFile& dex_file);
+ // Return the quickening info of a given method index (or null if it's empty).
+ ArrayRef<const uint8_t> GetQuickenedInfoOf(const DexFile& dex_file,
+ uint32_t dex_method_idx) const;
- static constexpr uint32_t kNoQuickeningInfoOffset = -1;
-
- private:
bool HasDexSection() const {
return GetHeader().GetDexSize() != 0;
}
+ private:
+ uint32_t GetQuickeningInfoTableOffset(const uint8_t* source_dex_begin) const;
+
+ // Source dex must be the in the vdex file.
+ void UnquickenDexFile(const DexFile& target_dex_file,
+ const uint8_t* source_dex_begin,
+ bool decompile_return_instruction) const;
+
+ QuickenInfoOffsetTableAccessor GetQuickenInfoOffsetTable(
+ const DexFile& dex_file,
+ const ArrayRef<const uint8_t>& quickening_info) const;
+
+ QuickenInfoOffsetTableAccessor GetQuickenInfoOffsetTable(
+ const uint8_t* source_dex_begin,
+ uint32_t num_method_ids,
+ const ArrayRef<const uint8_t>& quickening_info) const;
+
+ bool ContainsDexFile(const DexFile& dex_file) const;
+
const uint8_t* DexBegin() const {
return Begin() + sizeof(Header) + GetHeader().GetSizeOfChecksumsSection();
}
@@ -191,8 +257,6 @@ class VdexFile {
return DexBegin() + GetHeader().GetDexSize();
}
- uint32_t GetDexFileIndex(const DexFile& dex_file) const;
-
std::unique_ptr<MemMap> mmap_;
DISALLOW_COPY_AND_ASSIGN(VdexFile);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 416ada84cd..afb3224944 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -1685,10 +1685,15 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
for (const DexInstructionPcPair& inst : code_item_accessor_) {
const size_t dex_pc = inst.DexPc();
- RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
- if (reg_line != nullptr) {
- vios->Stream() << reg_line->Dump(this) << "\n";
+
+ // Might be asked to dump before the table is initialized.
+ if (reg_table_.IsInitialized()) {
+ RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
+ if (reg_line != nullptr) {
+ vios->Stream() << reg_line->Dump(this) << "\n";
+ }
}
+
vios->Stream()
<< StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cadf4eb0ba..26c598f224 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -77,6 +77,10 @@ class PcToRegisterLineTable {
void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
uint16_t registers_size, MethodVerifier* verifier);
+ bool IsInitialized() const {
+ return !register_lines_.empty();
+ }
+
RegisterLine* GetLine(size_t idx) const {
return register_lines_[idx].get();
}
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f719782727..9e12d636d4 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -48,9 +48,6 @@ inline bool RegType::CanAccess(const RegType& other) const {
inline bool RegType::CanAccessMember(ObjPtr<mirror::Class> klass, uint32_t access_flags) const {
DCHECK(IsReferenceTypes());
- if ((access_flags & kAccPublic) != 0) {
- return true;
- }
if (IsNull()) {
return true;
}
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index dc57f81a67..5fe10f5c12 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -110,6 +110,7 @@ jmethodID WellKnownClasses::java_lang_System_runFinalization = nullptr;
jmethodID WellKnownClasses::java_lang_Thread_dispatchUncaughtException;
jmethodID WellKnownClasses::java_lang_Thread_init;
jmethodID WellKnownClasses::java_lang_Thread_run;
+jmethodID WellKnownClasses::java_lang_ThreadGroup_add;
jmethodID WellKnownClasses::java_lang_ThreadGroup_removeThread;
jmethodID WellKnownClasses::java_nio_DirectByteBuffer_init;
jmethodID WellKnownClasses::libcore_reflect_AnnotationFactory_createAnnotation;
@@ -347,6 +348,7 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
java_lang_Thread_init = CacheMethod(env, java_lang_Thread, false, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
java_lang_Thread_run = CacheMethod(env, java_lang_Thread, false, "run", "()V");
+ java_lang_ThreadGroup_add = CacheMethod(env, java_lang_ThreadGroup, false, "add", "(Ljava/lang/Thread;)V");
java_lang_ThreadGroup_removeThread = CacheMethod(env, java_lang_ThreadGroup, false, "threadTerminated", "(Ljava/lang/Thread;)V");
java_nio_DirectByteBuffer_init = CacheMethod(env, java_nio_DirectByteBuffer, false, "<init>", "(JI)V");
libcore_reflect_AnnotationFactory_createAnnotation = CacheMethod(env, libcore_reflect_AnnotationFactory, true, "createAnnotation", "(Ljava/lang/Class;[Llibcore/reflect/AnnotationMember;)Ljava/lang/annotation/Annotation;");
@@ -496,6 +498,7 @@ void WellKnownClasses::Clear() {
java_lang_Thread_dispatchUncaughtException = nullptr;
java_lang_Thread_init = nullptr;
java_lang_Thread_run = nullptr;
+ java_lang_ThreadGroup_add = nullptr;
java_lang_ThreadGroup_removeThread = nullptr;
java_nio_DirectByteBuffer_init = nullptr;
libcore_reflect_AnnotationFactory_createAnnotation = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 024971ae3d..9e0b079b7b 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -121,6 +121,7 @@ struct WellKnownClasses {
static jmethodID java_lang_Thread_dispatchUncaughtException;
static jmethodID java_lang_Thread_init;
static jmethodID java_lang_Thread_run;
+ static jmethodID java_lang_ThreadGroup_add;
static jmethodID java_lang_ThreadGroup_removeThread;
static jmethodID java_nio_DirectByteBuffer_init;
static jmethodID libcore_reflect_AnnotationFactory_createAnnotation;
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index f3d4d77214..2caed4b391 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -29,6 +29,7 @@
#include "base/bit_utils.h"
#include "base/unix_file/fd_file.h"
+#include "dex/dex_file.h"
namespace art {
@@ -49,11 +50,15 @@ bool ZipEntry::IsUncompressed() {
return zip_entry_->method == kCompressStored;
}
-bool ZipEntry::IsAlignedTo(size_t alignment) {
+bool ZipEntry::IsAlignedTo(size_t alignment) const {
DCHECK(IsPowerOfTwo(alignment)) << alignment;
return IsAlignedParam(zip_entry_->offset, static_cast<int>(alignment));
}
+bool ZipEntry::IsAlignedToDexHeader() const {
+ return IsAlignedTo(alignof(DexFile::Header));
+}
+
ZipEntry::~ZipEntry() {
delete zip_entry_;
}
diff --git a/runtime/zip_archive.h b/runtime/zip_archive.h
index 75f8757f6c..70518e1360 100644
--- a/runtime/zip_archive.h
+++ b/runtime/zip_archive.h
@@ -59,7 +59,8 @@ class ZipEntry {
uint32_t GetCrc32();
bool IsUncompressed();
- bool IsAlignedTo(size_t alignment);
+ bool IsAlignedTo(size_t alignment) const;
+ bool IsAlignedToDexHeader() const;
private:
ZipEntry(ZipArchiveHandle handle,
diff --git a/test/004-NativeAllocations/src-art/Main.java b/test/004-NativeAllocations/src-art/Main.java
index 8712755125..6b1c48d286 100644
--- a/test/004-NativeAllocations/src-art/Main.java
+++ b/test/004-NativeAllocations/src-art/Main.java
@@ -14,82 +14,109 @@
* limitations under the License.
*/
-import java.lang.reflect.*;
import java.lang.Runtime;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.PhantomReference;
import dalvik.system.VMRuntime;
public class Main {
- static Object nativeLock = new Object();
static Object deadlockLock = new Object();
- static boolean aboutToDeadlockLock = false;
- static int nativeBytes = 0;
- static Object runtime;
- static Method register_native_allocation;
- static Method register_native_free;
- static long maxMem = 0;
-
- static class NativeAllocation {
- private int bytes;
-
- NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
- this.bytes = bytes;
- register_native_allocation.invoke(runtime, bytes);
-
- // Register native allocation can only provide guarantees bounding
- // the maximum outstanding allocations if finalizers don't time
- // out. In case finalizers have timed out, wait longer for them
- // now to complete so we can test the guarantees.
- if (!testingDeadlock) {
- VMRuntime.runFinalization(0);
- }
+ static VMRuntime runtime = VMRuntime.getRuntime();
+ static volatile boolean aboutToDeadlock = false;
- synchronized (nativeLock) {
- if (!testingDeadlock) {
- nativeBytes += bytes;
- if (nativeBytes > 2 * maxMem) {
- throw new OutOfMemoryError();
- }
- }
- }
- }
+ // Save ref as a static field to ensure it doesn't get GC'd before the
+ // referent is enqueued.
+ static PhantomReference ref = null;
+ static class DeadlockingFinalizer {
protected void finalize() throws Exception {
- synchronized (nativeLock) {
- nativeBytes -= bytes;
- }
- register_native_free.invoke(runtime, bytes);
- aboutToDeadlockLock = true;
- synchronized (deadlockLock) {
- }
+ aboutToDeadlock = true;
+ synchronized (deadlockLock) { }
+ }
+ }
+
+ private static void allocateDeadlockingFinalizer() {
+ new DeadlockingFinalizer();
+ }
+
+ public static PhantomReference allocPhantom(ReferenceQueue<Object> queue) {
+ return new PhantomReference(new Object(), queue);
+ }
+
+ // Test that calling registerNativeAllocation triggers a GC eventually
+ // after a substantial number of registered native bytes.
+ private static void checkRegisterNativeAllocation() throws Exception {
+ long maxMem = Runtime.getRuntime().maxMemory();
+ int size = (int)(maxMem / 32);
+ int allocationCount = 256;
+ int maxExpectedGcDurationMs = 2000;
+
+ ReferenceQueue<Object> queue = new ReferenceQueue<Object>();
+ ref = allocPhantom(queue);
+ long total = 0;
+ for (int i = 0; !ref.isEnqueued() && i < allocationCount; ++i) {
+ runtime.registerNativeAllocation(size);
+ total += size;
+
+ // Sleep a little bit to ensure not all of the calls to
+ // registerNativeAllocation complete while GC is in the process of
+ // running.
+ Thread.sleep(maxExpectedGcDurationMs / allocationCount);
+ }
+
+ // Wait up to maxExpectedGcDurationMs to give GC a chance to finish
+ // running. If the reference isn't enqueued after that, then it is
+ // pretty unlikely (though technically still possible) that GC was
+ // triggered as intended.
+ if (queue.remove(maxExpectedGcDurationMs) == null) {
+ throw new RuntimeException("GC failed to complete");
+ }
+
+ while (total > 0) {
+ runtime.registerNativeFree(size);
+ total -= size;
+ }
+ }
+
+ // Call registerNativeAllocation repeatedly at a high rate to trigger the
+ // case of blocking registerNativeAllocation.
+ private static void triggerBlockingRegisterNativeAllocation() throws Exception {
+ long maxMem = Runtime.getRuntime().maxMemory();
+ int size = (int)(maxMem / 5);
+ int allocationCount = 10;
+
+ long total = 0;
+ for (int i = 0; i < allocationCount; ++i) {
+ runtime.registerNativeAllocation(size);
+ total += size;
+ }
+
+ while (total > 0) {
+ runtime.registerNativeFree(size);
+ total -= size;
}
}
public static void main(String[] args) throws Exception {
- Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
- Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
- runtime = get_runtime.invoke(null);
- register_native_allocation = vm_runtime.getDeclaredMethod("registerNativeAllocation", Integer.TYPE);
- register_native_free = vm_runtime.getDeclaredMethod("registerNativeFree", Integer.TYPE);
- maxMem = Runtime.getRuntime().maxMemory();
- int count = 16;
- int size = (int)(maxMem / 2 / count);
- int allocation_count = 256;
- NativeAllocation[] allocations = new NativeAllocation[count];
- for (int i = 0; i < allocation_count; ++i) {
- allocations[i % count] = new NativeAllocation(size, false);
+ // Test that registerNativeAllocation triggers GC.
+ // Run this a few times in a loop to reduce the chances that the test
+ // is flaky and make sure registerNativeAllocation continues to work
+ // after the first GC is triggered.
+ for (int i = 0; i < 20; ++i) {
+ checkRegisterNativeAllocation();
}
- // Test that we don't get a deadlock if we are holding nativeLock. If there is no timeout,
- // then we will get a finalizer timeout exception.
- aboutToDeadlockLock = false;
+
+ // Test that we don't get a deadlock if we call
+ // registerNativeAllocation with a blocked finalizer.
synchronized (deadlockLock) {
- for (int i = 0; aboutToDeadlockLock != true; ++i) {
- allocations[i % count] = new NativeAllocation(size, true);
+ allocateDeadlockingFinalizer();
+ while (!aboutToDeadlock) {
+ Runtime.getRuntime().gc();
}
+
// Do more allocations now that the finalizer thread is deadlocked so that we force
- // finalization and timeout.
- for (int i = 0; i < 10; ++i) {
- allocations[i % count] = new NativeAllocation(size, true);
- }
+ // finalization and timeout.
+ triggerBlockingRegisterNativeAllocation();
}
System.out.println("Test complete");
}
diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src-art/Main.java
index 6ad160c1a6..a142934638 100644
--- a/test/004-ThreadStress/src/Main.java
+++ b/test/004-ThreadStress/src-art/Main.java
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+import dalvik.system.VMRuntime;
+
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.Arrays;
@@ -32,23 +34,26 @@ import java.util.concurrent.Semaphore;
// (It is important to pass Main if you want to give parameters...)
//
// ThreadStress command line parameters:
-// -n X ............ number of threads
-// -d X ............ number of daemon threads
-// -o X ............ number of overall operations
-// -t X ............ number of operations per thread
-// -p X ............ number of permits granted by semaphore
-// --dumpmap ....... print the frequency map
-// -oom:X .......... frequency of OOM (double)
-// -sigquit:X ...... frequency of SigQuit (double)
-// -alloc:X ........ frequency of Alloc (double)
-// -largealloc:X ... frequency of LargeAlloc (double)
-// -stacktrace:X ... frequency of StackTrace (double)
-// -exit:X ......... frequency of Exit (double)
-// -sleep:X ........ frequency of Sleep (double)
-// -wait:X ......... frequency of Wait (double)
-// -timedwait:X .... frequency of TimedWait (double)
-// -syncandwork:X .. frequency of SyncAndWork (double)
-// -queuedwait:X ... frequency of QueuedWait (double)
+// -n X .............. number of threads
+// -d X .............. number of daemon threads
+// -o X .............. number of overall operations
+// -t X .............. number of operations per thread
+// -p X .............. number of permits granted by semaphore
+// --dumpmap ......... print the frequency map
+// --locks-only ...... select a pre-set frequency map with lock-related operations only
+// --allocs-only ..... select a pre-set frequency map with allocation-related operations only
+// -oom:X ............ frequency of OOM (double)
+// -sigquit:X ........ frequency of SigQuit (double)
+// -alloc:X .......... frequency of Alloc (double)
+// -largealloc:X ..... frequency of LargeAlloc (double)
+// -nonmovingalloc:X.. frequency of NonMovingAlloc (double)
+// -stacktrace:X ..... frequency of StackTrace (double)
+// -exit:X ........... frequency of Exit (double)
+// -sleep:X .......... frequency of Sleep (double)
+// -wait:X ........... frequency of Wait (double)
+// -timedwait:X ...... frequency of TimedWait (double)
+// -syncandwork:X .... frequency of SyncAndWork (double)
+// -queuedwait:X ..... frequency of QueuedWait (double)
public class Main implements Runnable {
@@ -156,6 +161,25 @@ public class Main implements Runnable {
}
}
+ private final static class NonMovingAlloc extends Operation {
+ private final static int ALLOC_SIZE = 1024; // Needs to be small enough to not be in LOS.
+ private final static int ALLOC_COUNT = 1024;
+ private final static VMRuntime runtime = VMRuntime.getRuntime();
+
+ @Override
+ public boolean perform() {
+ try {
+ List<byte[]> l = new ArrayList<byte[]>();
+ for (int i = 0; i < ALLOC_COUNT; i++) {
+ l.add((byte[]) runtime.newNonMovableArray(byte.class, ALLOC_SIZE));
+ }
+ } catch (OutOfMemoryError e) {
+ }
+ return true;
+ }
+ }
+
+
private final static class StackTrace extends Operation {
@Override
public boolean perform() {
@@ -289,26 +313,39 @@ public class Main implements Runnable {
private final static Map<Operation, Double> createDefaultFrequencyMap(Object lock,
Semaphore semaphore) {
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
- frequencyMap.put(new OOM(), 0.005); // 1/200
- frequencyMap.put(new SigQuit(), 0.095); // 19/200
- frequencyMap.put(new Alloc(), 0.225); // 45/200
- frequencyMap.put(new LargeAlloc(), 0.05); // 10/200
- frequencyMap.put(new StackTrace(), 0.1); // 20/200
- frequencyMap.put(new Exit(), 0.225); // 45/200
- frequencyMap.put(new Sleep(), 0.125); // 25/200
- frequencyMap.put(new TimedWait(lock), 0.05); // 10/200
- frequencyMap.put(new Wait(lock), 0.075); // 15/200
- frequencyMap.put(new QueuedWait(semaphore), 0.05); // 10/200
+ frequencyMap.put(new OOM(), 0.005); // 1/200
+ frequencyMap.put(new SigQuit(), 0.095); // 19/200
+ frequencyMap.put(new Alloc(), 0.225); // 45/200
+ frequencyMap.put(new LargeAlloc(), 0.05); // 10/200
+ // TODO: NonMovingAlloc operations fail an assertion with the
+ // GSS collector (see b/72738921); disable them for now.
+ frequencyMap.put(new NonMovingAlloc(), 0.0); // 0/200
+ frequencyMap.put(new StackTrace(), 0.1); // 20/200
+ frequencyMap.put(new Exit(), 0.225); // 45/200
+ frequencyMap.put(new Sleep(), 0.125); // 25/200
+ frequencyMap.put(new TimedWait(lock), 0.05); // 10/200
+ frequencyMap.put(new Wait(lock), 0.075); // 15/200
+ frequencyMap.put(new QueuedWait(semaphore), 0.05); // 10/200
+
+ return frequencyMap;
+ }
+
+ private final static Map<Operation, Double> createAllocFrequencyMap() {
+ Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
+ frequencyMap.put(new Sleep(), 0.2); // 40/200
+ frequencyMap.put(new Alloc(), 0.575); // 115/200
+ frequencyMap.put(new LargeAlloc(), 0.15); // 30/200
+ frequencyMap.put(new NonMovingAlloc(), 0.075); // 15/200
return frequencyMap;
}
private final static Map<Operation, Double> createLockFrequencyMap(Object lock) {
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
- frequencyMap.put(new Sleep(), 0.2); // 40/200
- frequencyMap.put(new TimedWait(lock), 0.2); // 40/200
- frequencyMap.put(new Wait(lock), 0.2); // 40/200
- frequencyMap.put(new SyncAndWork(lock), 0.4); // 80/200
+ frequencyMap.put(new Sleep(), 0.2); // 40/200
+ frequencyMap.put(new TimedWait(lock), 0.2); // 40/200
+ frequencyMap.put(new Wait(lock), 0.2); // 40/200
+ frequencyMap.put(new SyncAndWork(lock), 0.4); // 80/200
return frequencyMap;
}
@@ -414,11 +451,14 @@ public class Main implements Runnable {
i++;
permits = Integer.parseInt(args[i]);
} else if (args[i].equals("--locks-only")) {
- lock = new Object();
frequencyMap = createLockFrequencyMap(lock);
+ } else if (args[i].equals("--allocs-only")) {
+ frequencyMap = createAllocFrequencyMap();
} else if (args[i].equals("--dumpmap")) {
dumpMap = true;
} else {
+ // Processing an argument of the form "-<operation>:X"
+ // (where X is a double value).
Semaphore semaphore = getSemaphore(permits);
frequencyMap = updateFrequencyMap(frequencyMap, lock, semaphore, args[i]);
}
diff --git a/test/044-proxy/src/Main.java b/test/044-proxy/src/Main.java
index e44c122e3d..7b70e65b8c 100644
--- a/test/044-proxy/src/Main.java
+++ b/test/044-proxy/src/Main.java
@@ -54,4 +54,8 @@ public class Main {
private static final HashMap<String, String> proxyClassNameMap = new HashMap<String, String>();
private static int uniqueTestProxyClassNum = 0;
+
+ static native void startJit();
+ static native void stopJit();
+ static native void waitForCompilation();
}
diff --git a/test/044-proxy/src/OOMEOnDispatch.java b/test/044-proxy/src/OOMEOnDispatch.java
index 94f267980d..2ee57926ae 100644
--- a/test/044-proxy/src/OOMEOnDispatch.java
+++ b/test/044-proxy/src/OOMEOnDispatch.java
@@ -32,6 +32,11 @@ public class OOMEOnDispatch implements InvocationHandler {
OOMEInterface.class.getClassLoader(), new Class[] { OOMEInterface.class },
handler);
+ // Stop the JIT to be sure nothing is running that could be resolving classes or causing
+ // verification.
+ Main.stopJit();
+ Main.waitForCompilation();
+
int l = 1024 * 1024;
while (l > 8) {
try {
@@ -40,17 +45,6 @@ public class OOMEOnDispatch implements InvocationHandler {
l = l/2;
}
}
- // Have an extra run with the exact size of Method objects. The above loop should have
- // filled with enough large objects for simplicity and speed, but ensure exact allocation
- // size.
- final int methodAsByteArrayLength = 40 - 12; // Method size - byte array overhead.
- for (;;) {
- try {
- storage.add(new byte[methodAsByteArrayLength]);
- } catch (OutOfMemoryError e) {
- break;
- }
- }
try {
inf.foo();
@@ -60,6 +54,8 @@ public class OOMEOnDispatch implements InvocationHandler {
storage.clear();
System.out.println("Received OOME");
}
+
+ Main.startJit();
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
diff --git a/test/071-dexfile-get-static-size/build b/test/071-dexfile-get-static-size/build
index 0bba66d065..412ee6dd46 100755
--- a/test/071-dexfile-get-static-size/build
+++ b/test/071-dexfile-get-static-size/build
@@ -16,15 +16,13 @@
./default-build "$@"
-# Create and add as resources to the test jar file:
+# Bundle with the test the following resources:
# 1. test1.dex
# 2. test2.dex
# 3. test-jar.jar, containing test1.dex as classes.dex
# 4. multi-jar.jar, containing test1.dex as classes.dex and test2.dex as classes2.dex
mkdir test-jar
-cp test1.dex test-jar/classes.dex
-cp test2.dex test-jar/classes2.dex
-zip -j test-jar.jar test-jar/classes.dex
-zip -j multi-jar.jar test-jar/classes.dex test-jar/classes2.dex
-jar uf ${TEST_NAME}.jar test1.dex test2.dex test-jar.jar multi-jar.jar
-
+cp res/test1.dex test-jar/classes.dex
+cp res/test2.dex test-jar/classes2.dex
+zip -j res/test-jar.jar test-jar/classes.dex
+zip -j res/multi-jar.jar test-jar/classes.dex test-jar/classes2.dex
diff --git a/test/071-dexfile-get-static-size/test1.dex b/test/071-dexfile-get-static-size/res/test1.dex
index 84602d03c2..84602d03c2 100644
--- a/test/071-dexfile-get-static-size/test1.dex
+++ b/test/071-dexfile-get-static-size/res/test1.dex
Binary files differ
diff --git a/test/071-dexfile-get-static-size/test2.dex b/test/071-dexfile-get-static-size/res/test2.dex
index a07c46ef59..a07c46ef59 100644
--- a/test/071-dexfile-get-static-size/test2.dex
+++ b/test/071-dexfile-get-static-size/res/test2.dex
Binary files differ
diff --git a/test/071-dexfile-get-static-size/src/Main.java b/test/071-dexfile-get-static-size/src/Main.java
index 4bf453801e..8dbbba56c3 100644
--- a/test/071-dexfile-get-static-size/src/Main.java
+++ b/test/071-dexfile-get-static-size/src/Main.java
@@ -14,26 +14,9 @@
* limitations under the License.
*/
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.FileOutputStream;
-import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
public class Main {
- private static void extractResource(String resource, String filename) throws Exception {
- ClassLoader loader = Main.class.getClassLoader();
- InputStream is = loader.getResourceAsStream(resource);
- OutputStream os = new FileOutputStream(filename);
- int read;
- byte[] buf = new byte[4096];
- while ((read = is.read(buf)) >= 0) {
- os.write(buf, 0, read);
- }
- is.close();
- os.close();
- }
-
private static long getDexFileSize(String filename) throws Exception {
ClassLoader loader = Main.class.getClassLoader();
Class<?> DexFile = loader.loadClass("dalvik.system.DexFile");
@@ -47,8 +30,7 @@ public class Main {
}
private static void test(String resource) throws Exception {
- String filename = System.getenv("DEX_LOCATION") + "/" + resource;
- extractResource(resource, filename);
+ String filename = System.getenv("DEX_LOCATION") + "/res/" + resource;
long size = getDexFileSize(filename);
System.out.println("Size for " + resource + ": " + size);
}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index ef758e86e1..83234f0382 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -71,7 +71,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
for (Backtrace::const_iterator it = bt->begin(); it != bt->end(); ++it) {
if (BacktraceMap::IsValid(it->map)) {
LOG(INFO) << "Got " << it->func_name << ", looking for " << seq[cur_search_index];
- if (it->func_name == seq[cur_search_index]) {
+ if (it->func_name.find(seq[cur_search_index]) != std::string::npos) {
cur_search_index++;
if (cur_search_index == seq.size()) {
return true;
@@ -83,7 +83,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
printf("Cannot find %s in backtrace:\n", seq[cur_search_index].c_str());
for (Backtrace::const_iterator it = bt->begin(); it != bt->end(); ++it) {
if (BacktraceMap::IsValid(it->map)) {
- printf(" %s\n", it->func_name.c_str());
+ printf(" %s\n", Backtrace::FormatFrameData(&*it).c_str());
}
}
@@ -107,7 +107,7 @@ static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
JNIEnv*,
jobject,
- jboolean full_signatrues,
+ jboolean,
jint,
jboolean) {
#if __linux__
@@ -129,17 +129,12 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
std::vector<std::string> seq = {
"Java_Main_unwindInProcess", // This function.
"Main.unwindInProcess", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
+ "java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runBase", // Method in other dex file.
"Main.main" // The Java entry method.
};
- std::vector<std::string> full_seq = {
- "Java_Main_unwindInProcess", // This function.
- "boolean Main.unwindInProcess(boolean, int, boolean)", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
- "void Main.main(java.lang.String[])" // The Java entry method.
- };
- bool result = CheckStack(bt.get(), full_signatrues ? full_seq : seq);
+ bool result = CheckStack(bt.get(), seq);
if (!kCauseSegfault) {
return result ? JNI_TRUE : JNI_FALSE;
} else {
@@ -191,7 +186,7 @@ int wait_for_sigstop(pid_t tid, int* total_sleep_time_usec, bool* detach_failed
extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
JNIEnv*,
jobject,
- jboolean full_signatrues,
+ jboolean,
jint pid_int) {
#if __linux__
// TODO: What to do on Valgrind?
@@ -235,20 +230,12 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
// Note: For some reason, the name isn't
// resolved, so don't look for it right now.
"Main.sleep", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
+ "java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runBase", // Method in other dex file.
"Main.main" // The Java entry method.
};
- std::vector<std::string> full_seq = {
- // "Java_Main_sleep", // The sleep function being executed in the
- // other runtime.
- // Note: For some reason, the name isn't
- // resolved, so don't look for it right now.
- "boolean Main.sleep(int, boolean, double)", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
- "void Main.main(java.lang.String[])" // The Java entry method.
- };
- result = CheckStack(bt.get(), full_signatrues ? full_seq : seq);
+ result = CheckStack(bt.get(), seq);
}
constexpr bool kSigQuitOnFail = true;
diff --git a/test/137-cfi/src-multidex/Base.java b/test/137-cfi/src-multidex/Base.java
new file mode 100644
index 0000000000..d3f8a5681d
--- /dev/null
+++ b/test/137-cfi/src-multidex/Base.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public abstract class Base {
+ abstract public void runImpl();
+ public void runBase() {
+ runImpl();
+ }
+}
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 1ec70722b5..9a2e352b8c 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -20,7 +20,7 @@ import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.Comparator;
-public class Main implements Comparator<Main> {
+public class Main extends Base implements Comparator<Main> {
// Whether to test local unwinding.
private boolean testLocal;
@@ -57,10 +57,10 @@ public class Main implements Comparator<Main> {
}
public static void main(String[] args) throws Exception {
- new Main(args).run();
+ new Main(args).runBase();
}
- private void run() {
+ public void runImpl() {
if (secondary) {
if (!testRemote) {
throw new RuntimeException("Should not be running secondary!");
diff --git a/test/141-class-unload/jni_unload.cc b/test/141-class-unload/jni_unload.cc
index 355457d68d..894ae8b0d7 100644
--- a/test/141-class-unload/jni_unload.cc
+++ b/test/141-class-unload/jni_unload.cc
@@ -32,19 +32,5 @@ extern "C" JNIEXPORT void JNICALL Java_IntHolder_waitForCompilation(JNIEnv*, jcl
}
}
-extern "C" JNIEXPORT void JNICALL Java_Main_stopJit(JNIEnv*, jclass) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->Stop();
- }
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_startJit(JNIEnv*, jclass) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->Start();
- }
-}
-
} // namespace
} // namespace art
diff --git a/test/168-vmstack-annotated/expected.txt b/test/168-vmstack-annotated/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/168-vmstack-annotated/expected.txt
diff --git a/test/168-vmstack-annotated/info.txt b/test/168-vmstack-annotated/info.txt
new file mode 100644
index 0000000000..d849bc31ed
--- /dev/null
+++ b/test/168-vmstack-annotated/info.txt
@@ -0,0 +1 @@
+Regression test for b/68703210
diff --git a/test/168-vmstack-annotated/run b/test/168-vmstack-annotated/run
new file mode 100644
index 0000000000..93654113e6
--- /dev/null
+++ b/test/168-vmstack-annotated/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a smaller heap so it's easier to potentially fill up.
+exec ${RUN} $@ --runtime-option -Xmx2m
diff --git a/test/168-vmstack-annotated/src/Main.java b/test/168-vmstack-annotated/src/Main.java
new file mode 100644
index 0000000000..8234f945c0
--- /dev/null
+++ b/test/168-vmstack-annotated/src/Main.java
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.Thread.State;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+public class Main {
+
+ static class Runner implements Runnable {
+ List<Object> locks;
+ List<CyclicBarrier> barriers;
+
+ public Runner(List<Object> locks, List<CyclicBarrier> barriers) {
+ this.locks = locks;
+ this.barriers = barriers;
+ }
+
+ @Override
+ public void run() {
+ step(locks, barriers);
+ }
+
+ private void step(List<Object> l, List<CyclicBarrier> b) {
+ if (l.isEmpty()) {
+ // Nothing to do, sleep indefinitely.
+ try {
+ Thread.sleep(100000000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ Object lockObject = l.remove(0);
+ CyclicBarrier barrierObject = b.remove(0);
+
+ if (lockObject == null) {
+ // No lock object: only take barrier, recurse.
+ try {
+ barrierObject.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ } else if (barrierObject != null) {
+ // Have barrier: sync, wait and recurse.
+ synchronized(lockObject) {
+ try {
+ barrierObject.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ }
+ } else {
+ // Sync, and get next step (which is assumed to have object and barrier).
+ synchronized (lockObject) {
+ Object lockObject2 = l.remove(0);
+ CyclicBarrier barrierObject2 = b.remove(0);
+ synchronized(lockObject2) {
+ try {
+ barrierObject2.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ try {
+ testCluster1();
+ } catch (Exception e) {
+ Map<Thread,StackTraceElement[]> stacks = Thread.getAllStackTraces();
+ for (Map.Entry<Thread,StackTraceElement[]> entry : stacks.entrySet()) {
+ System.out.println(entry.getKey());
+ System.out.println(Arrays.toString(entry.getValue()));
+ }
+ throw e;
+ }
+ }
+
+ private static void testCluster1() throws Exception {
+ // Test setup (at deadlock):
+ //
+ // Thread 1:
+ // #0 step: synchornized(o3) { synchronized(o2) }
+ // #1 step: synchronized(o1)
+ //
+ // Thread 2:
+ // #0 step: synchronized(o1)
+ // #1 step: synchronized(o4) { synchronized(o2) }
+ //
+ LinkedList<Object> l1 = new LinkedList<>();
+ LinkedList<CyclicBarrier> b1 = new LinkedList<>();
+ LinkedList<Object> l2 = new LinkedList<>();
+ LinkedList<CyclicBarrier> b2 = new LinkedList<>();
+
+ Object o1 = new Object();
+ Object o2 = new Object();
+ Object o3 = new Object();
+ Object o4 = new Object();
+
+ l1.add(o1);
+ l1.add(o3);
+ l1.add(o2);
+ l2.add(o4);
+ l2.add(o2);
+ l2.add(o1);
+
+ CyclicBarrier c1 = new CyclicBarrier(3);
+ CyclicBarrier c2 = new CyclicBarrier(2);
+ b1.add(c1);
+ b1.add(null);
+ b1.add(c2);
+ b2.add(null);
+ b2.add(c1);
+ b2.add(c2);
+
+ Thread t1 = new Thread(new Runner(l1, b1));
+ t1.setDaemon(true);
+ t1.start();
+ Thread t2 = new Thread(new Runner(l2, b2));
+ t2.setDaemon(true);
+ t2.start();
+
+ c1.await();
+
+ waitNotRunnable(t1);
+ waitNotRunnable(t2);
+ Thread.sleep(250); // Unfortunately this seems necessary. :-(
+
+ // Thread 1.
+ {
+ Object[] stack1 = getAnnotatedStack(t1);
+ assertBlockedOn(stack1[0], o2); // Blocked on o2.
+ assertLocks(stack1[0], o3); // Locked o3.
+ assertStackTraceElementStep(stack1[0]);
+
+ assertBlockedOn(stack1[1], null); // Frame can't be blocked.
+ assertLocks(stack1[1], o1); // Locked o1.
+ assertStackTraceElementStep(stack1[1]);
+ }
+
+ // Thread 2.
+ {
+ Object[] stack2 = getAnnotatedStack(t2);
+ assertBlockedOn(stack2[0], o1); // Blocked on o1.
+ assertLocks(stack2[0]); // Nothing locked.
+ assertStackTraceElementStep(stack2[0]);
+
+ assertBlockedOn(stack2[1], null); // Frame can't be blocked.
+ assertLocks(stack2[1], o4, o2); // Locked o4, o2.
+ assertStackTraceElementStep(stack2[1]);
+ }
+ }
+
+ private static void waitNotRunnable(Thread t) throws InterruptedException {
+ while (t.getState() == State.RUNNABLE) {
+ Thread.sleep(100);
+ }
+ }
+
+ private static Object[] getAnnotatedStack(Thread t) throws Exception {
+ Class<?> vmStack = Class.forName("dalvik.system.VMStack");
+ Method m = vmStack.getDeclaredMethod("getAnnotatedThreadStackTrace", Thread.class);
+ return (Object[]) m.invoke(null, t);
+ }
+
+ private static void assertEquals(Object o1, Object o2) {
+ if (o1 != o2) {
+ throw new RuntimeException("Expected " + o1 + " == " + o2);
+ }
+ }
+ private static void assertLocks(Object fromTrace, Object... locks) throws Exception {
+ Object fieldValue = fromTrace.getClass().getDeclaredMethod("getHeldLocks").
+ invoke(fromTrace);
+ assertEquals((Object[]) fieldValue,
+ (locks == null) ? null : (locks.length == 0 ? null : locks));
+ }
+ private static void assertBlockedOn(Object fromTrace, Object block) throws Exception {
+ Object fieldValue = fromTrace.getClass().getDeclaredMethod("getBlockedOn").
+ invoke(fromTrace);
+ assertEquals(fieldValue, block);
+ }
+ private static void assertEquals(Object[] o1, Object[] o2) {
+ if (!Arrays.equals(o1, o2)) {
+ throw new RuntimeException(
+ "Expected " + Arrays.toString(o1) + " == " + Arrays.toString(o2));
+ }
+ }
+ private static void assertStackTraceElementStep(Object o) throws Exception {
+ Object fieldValue = o.getClass().getDeclaredMethod("getStackTraceElement").invoke(o);
+ if (fieldValue instanceof StackTraceElement) {
+ StackTraceElement elem = (StackTraceElement) fieldValue;
+ if (!elem.getMethodName().equals("step")) {
+ throw new RuntimeException("Expected step method");
+ }
+ return;
+ }
+ throw new RuntimeException("Expected StackTraceElement " + fieldValue + " / " + o);
+ }
+}
+
diff --git a/test/169-threadgroup-jni/expected.txt b/test/169-threadgroup-jni/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/169-threadgroup-jni/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/169-threadgroup-jni/info.txt b/test/169-threadgroup-jni/info.txt
new file mode 100644
index 0000000000..b4c77e232b
--- /dev/null
+++ b/test/169-threadgroup-jni/info.txt
@@ -0,0 +1 @@
+Ensure that attached threads are correctly handled in ThreadGroups.
diff --git a/test/169-threadgroup-jni/jni_daemon_thread.cc b/test/169-threadgroup-jni/jni_daemon_thread.cc
new file mode 100644
index 0000000000..94902dcf2c
--- /dev/null
+++ b/test/169-threadgroup-jni/jni_daemon_thread.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <jni.h>
+#include <nativehelper/scoped_local_ref.h>
+#include <pthread.h>
+
+#include <android-base/logging.h>
+
+namespace art {
+
+static JavaVM* vm = nullptr;
+
+static void* Runner(void* arg) {
+ CHECK(vm != nullptr);
+
+ jobject thread_group = reinterpret_cast<jobject>(arg);
+ JNIEnv* env = nullptr;
+ JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, thread_group };
+ int attach_result = vm->AttachCurrentThread(&env, &args);
+ CHECK_EQ(attach_result, 0);
+
+ {
+ ScopedLocalRef<jclass> klass(env, env->FindClass("Main"));
+ CHECK(klass != nullptr);
+
+ jmethodID id = env->GetStaticMethodID(klass.get(), "runFromNative", "()V");
+ CHECK(id != nullptr);
+
+ env->CallStaticVoidMethod(klass.get(), id);
+ }
+
+ int detach_result = vm->DetachCurrentThread();
+ CHECK_EQ(detach_result, 0);
+ return nullptr;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_testNativeThread(
+ JNIEnv* env, jclass, jobject thread_group) {
+ CHECK_EQ(env->GetJavaVM(&vm), 0);
+ jobject global_thread_group = env->NewGlobalRef(thread_group);
+
+ pthread_t pthread;
+ int pthread_create_result = pthread_create(&pthread, nullptr, Runner, global_thread_group);
+ CHECK_EQ(pthread_create_result, 0);
+ int pthread_join_result = pthread_join(pthread, nullptr);
+ CHECK_EQ(pthread_join_result, 0);
+
+ env->DeleteGlobalRef(global_thread_group);
+}
+
+} // namespace art
diff --git a/test/169-threadgroup-jni/src/Main.java b/test/169-threadgroup-jni/src/Main.java
new file mode 100644
index 0000000000..2cd1fcfa24
--- /dev/null
+++ b/test/169-threadgroup-jni/src/Main.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
+ ThreadGroup group = new ThreadGroup("Test group");
+ group.setDaemon(true);
+
+ testNativeThread(group);
+
+ if (!executed) {
+ throw new IllegalStateException("Expected runFromNative to be done.");
+ }
+ if (!group.isDestroyed()) {
+ throw new IllegalStateException("Threadgroup should be destroyed.");
+ }
+ }
+
+ private static boolean executed = false;
+ private static void runFromNative() {
+ executed = true;
+ }
+ private static native void testNativeThread(ThreadGroup group);
+}
diff --git a/test/305-other-fault-handler/expected.txt b/test/305-other-fault-handler/expected.txt
new file mode 100644
index 0000000000..6221e8e853
--- /dev/null
+++ b/test/305-other-fault-handler/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Passed!
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
new file mode 100644
index 0000000000..f04832613b
--- /dev/null
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <atomic>
+#include <memory>
+
+#include <jni.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/mman.h>
+
+#include "fault_handler.h"
+#include "globals.h"
+#include "mem_map.h"
+
+namespace art {
+
+class TestFaultHandler FINAL : public FaultHandler {
+ public:
+ explicit TestFaultHandler(FaultManager* manager)
+ : FaultHandler(manager),
+ map_error_(""),
+ target_map_(MemMap::MapAnonymous("test-305-mmap",
+ /* addr */ nullptr,
+ /* byte_count */ kPageSize,
+ /* prot */ PROT_NONE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ /* error_msg */ &map_error_,
+ /* use_ashmem */ false)),
+ was_hit_(false) {
+ CHECK(target_map_ != nullptr) << "Unable to create segfault target address " << map_error_;
+ manager_->AddHandler(this, /*in_generated_code*/false);
+ }
+
+ virtual ~TestFaultHandler() {
+ manager_->RemoveHandler(this);
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ CHECK_EQ(sig, SIGSEGV);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
+ GetTargetPointer()) << "Segfault on unexpected address!";
+ CHECK(!was_hit_) << "Recursive signal!";
+ was_hit_ = true;
+
+ LOG(INFO) << "SEGV Caught. mprotecting map.";
+ CHECK(target_map_->Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
+ LOG(INFO) << "Setting value to be read.";
+ *GetTargetPointer() = kDataValue;
+ LOG(INFO) << "Changing prot to be read-only.";
+ CHECK(target_map_->Protect(PROT_READ)) << "Failed to mprotect R-only";
+ return true;
+ }
+
+ void CauseSegfault() {
+ CHECK_EQ(target_map_->GetProtect(), PROT_NONE);
+
+ // This will segfault. The handler should deal with it though and we will get a value out of it.
+ uint32_t data = *GetTargetPointer();
+
+ // Prevent re-ordering around the *GetTargetPointer by the compiler
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+
+ CHECK(was_hit_);
+ CHECK_EQ(data, kDataValue) << "Unexpected read value from mmap";
+ CHECK_EQ(target_map_->GetProtect(), PROT_READ);
+ LOG(INFO) << "Success!";
+ }
+
+ private:
+ uint32_t* GetTargetPointer() {
+ return reinterpret_cast<uint32_t*>(target_map_->Begin() + 8);
+ }
+
+ static constexpr uint32_t kDataValue = 0xDEADBEEF;
+
+ std::string map_error_;
+ std::unique_ptr<MemMap> target_map_;
+ bool was_hit_;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_runFaultHandlerTest(JNIEnv*, jclass) {
+ std::unique_ptr<TestFaultHandler> handler(new TestFaultHandler(&fault_manager));
+ handler->CauseSegfault();
+}
+
+} // namespace art
diff --git a/test/305-other-fault-handler/info.txt b/test/305-other-fault-handler/info.txt
new file mode 100644
index 0000000000..656c8bd406
--- /dev/null
+++ b/test/305-other-fault-handler/info.txt
@@ -0,0 +1,3 @@
+Test that we correctly handle basic non-generated-code fault handlers
+
+Tests that we can use and remove these handlers and they can change mappings.
diff --git a/test/305-other-fault-handler/src/Main.java b/test/305-other-fault-handler/src/Main.java
new file mode 100644
index 0000000000..13a6fef730
--- /dev/null
+++ b/test/305-other-fault-handler/src/Main.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ runFaultHandlerTest();
+ System.out.println("Passed!");
+ }
+
+ public static native void runFaultHandlerTest();
+}
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 60e653c72f..4868355b90 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -1057,6 +1057,88 @@ public class Main {
}
}
+ /// CHECK-START: void Main.lengthAlias1(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: NotEqual [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias1(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void lengthAlias1(int[] a, int len) {
+ if (len == a.length) {
+ for (int i = 0; i < len; i++) {
+ a[i] = 1;
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.lengthAlias2(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: Equal [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias2(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void lengthAlias2(int[] a, int len) {
+ if (len != a.length) {
+ return;
+ }
+ for (int i = 0; i < len; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.lengthAlias3(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: NotEqual [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias3(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void lengthAlias3(int[] a, int len) {
+ if (a.length == len) {
+ for (int i = 0; i < len; i++) {
+ a[i] = 3;
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.lengthAlias4(int[]) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Val:i\d+>> IntConstant 8 loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: Equal [<<Len>>,<<Val>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias4(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ /// CHECK-NOT: Deoptimize
+ public static void lengthAlias4(int[] a) {
+ if (8 != a.length) {
+ return;
+ }
+ for (int i = 0; i < 8; i++) {
+ a[i] = 4;
+ }
+ }
+
static int[][] mA;
/// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (before)
@@ -1747,10 +1829,50 @@ public class Main {
System.out.println("nonzero length failed!");
}
+ array = new int[8];
+ lengthAlias1(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 1) {
+ System.out.println("alias1 failed!");
+ }
+ }
+ lengthAlias2(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 2) {
+ System.out.println("alias2 failed!");
+ }
+ }
+ lengthAlias3(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 3) {
+ System.out.println("alias3 failed!");
+ }
+ }
+ lengthAlias4(array);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 4) {
+ System.out.println("alias4 failed!");
+ }
+ }
+
+ array = new int[10];
+ lengthAlias1(array, /*mismatched value*/ 8);
+ lengthAlias2(array, /*mismatched value*/ 8);
+ lengthAlias3(array, /*mismatched value*/ 8);
+ lengthAlias4(array); // implicit mismatch
+ for (int i = 0; i < 10; i++) {
+ if (array[i] != 0) {
+ System.out.println("mismatch failed!");
+ }
+ }
+
// Zero length array does not break.
array = new int[0];
nonzeroLength(array);
knownLength(array);
+ lengthAlias1(array, 0);
+ lengthAlias2(array, 0);
+ lengthAlias3(array, 0);
mA = new int[4][4];
for (int i = 0; i < 4; i++) {
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index aeb9e44541..44ea0c9877 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -42,7 +42,8 @@ class TestVisitor : public StackVisitor {
CHECK(GetVReg(m, 0, kIntVReg, &value));
CHECK_EQ(value, 42u);
} else if (m_name.compare("$opt$noinline$testIntervalHole") == 0) {
- uint32_t number_of_dex_registers = CodeItemDataAccessor(m).RegistersSize();
+ uint32_t number_of_dex_registers =
+ CodeItemDataAccessor(m->DexInstructionData()).RegistersSize();
uint32_t dex_register_of_first_parameter = number_of_dex_registers - 2;
found_method_ = true;
uint32_t value = 0;
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index f6332b5503..98838c5089 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -398,7 +398,6 @@ public class Main {
/// CHECK-START: int Main.test15() load_store_elimination (after)
/// CHECK: <<Const2:i\d+>> IntConstant 2
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK-NOT: StaticFieldGet
/// CHECK: Return [<<Const2>>]
@@ -773,6 +772,127 @@ public class Main {
return obj;
}
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore2(TestClass2 obj) {
+ obj.i = 41;
+ obj.j = 42;
+ obj.i = 43;
+ obj.j = 44;
+ }
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore3(TestClass2 obj, boolean flag) {
+ obj.i = 41;
+ obj.j = 42; // redundant since it's overwritten in both branches below.
+ if (flag) {
+ obj.j = 43;
+ } else {
+ obj.j = 44;
+ }
+ }
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (before)
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldSet
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (after)
+ /// CHECK: StaticFieldSet
+ /// CHECK-NOT: StaticFieldSet
+
+ private static void testStoreStore4() {
+ TestClass.si = 61;
+ TestClass.si = 62;
+ }
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore5(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 71; // This store is needed since obj2.i may load from it.
+ int i = obj2.i;
+ obj1.i = 72;
+ return i;
+ }
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore6(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 81; // This store is not needed since obj2.j cannot load from it.
+ int j = obj2.j;
+ obj1.i = 82;
+ return j;
+ }
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+
+ private static int testNoSideEffects(int[] array) {
+ array[0] = 101;
+ array[1] = 102;
+ int bitCount = Integer.bitCount(0x3456);
+ array[1] = 103;
+ return array[0] + bitCount;
+ }
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ // Make sure throw keeps the store.
+ private static void testThrow(TestClass2 obj, Exception e) throws Exception {
+ obj.i = 55;
+ throw e;
+ }
+
/// CHECK-START: int Main.testStoreStoreWithDeoptimize(int[]) load_store_elimination (before)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
@@ -814,23 +934,6 @@ public class Main {
return arr[0] + arr[1] + arr[2] + arr[3];
}
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK: ArrayGet
-
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK-NOT: ArrayGet
-
- private static int testNoSideEffects(int[] array) {
- array[0] = 101;
- int bitCount = Integer.bitCount(0x3456);
- array[1] = array[0] + 1;
- return array[0] + bitCount;
- }
-
/// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
/// CHECK: NewInstance
@@ -1105,16 +1208,46 @@ public class Main {
assertIntEquals(testStoreStore().i, 41);
assertIntEquals(testStoreStore().j, 43);
- assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
assertIntEquals(testExitMerge(true), 2);
assertIntEquals(testExitMerge2(true), 2);
assertIntEquals(testExitMerge2(false), 2);
- int ret = testNoSideEffects(iarray);
+ TestClass2 testclass2 = new TestClass2();
+ testStoreStore2(testclass2);
+ assertIntEquals(testclass2.i, 43);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore3(testclass2, true);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 43);
+ testStoreStore3(testclass2, false);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore4();
+ assertIntEquals(TestClass.si, 62);
+
+ int ret = testStoreStore5(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 72);
+ assertIntEquals(ret, 71);
+
+ testclass2.j = 88;
+ ret = testStoreStore6(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 82);
+ assertIntEquals(ret, 88);
+
+ ret = testNoSideEffects(iarray);
assertIntEquals(iarray[0], 101);
- assertIntEquals(iarray[1], 102);
+ assertIntEquals(iarray[1], 103);
assertIntEquals(ret, 108);
+
+ try {
+ testThrow(testclass2, new Exception());
+ } catch (Exception e) {}
+ assertIntEquals(testclass2.i, 55);
+
+ assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
}
static boolean sFlag;
diff --git a/test/530-regression-lse/expected.txt b/test/530-regression-lse/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/530-regression-lse/expected.txt
diff --git a/test/530-regression-lse/info.txt b/test/530-regression-lse/info.txt
new file mode 100644
index 0000000000..688d0c8d1b
--- /dev/null
+++ b/test/530-regression-lse/info.txt
@@ -0,0 +1,2 @@
+Regression test (b/72440777) for load store elimination across invocation
+that has only write side effects.
diff --git a/test/530-regression-lse/src/Main.java b/test/530-regression-lse/src/Main.java
new file mode 100644
index 0000000000..7aec21c86a
--- /dev/null
+++ b/test/530-regression-lse/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+
+public class Main {
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Assertion failed: " + expected + " != " + actual);
+ }
+ }
+
+ private static void testRelativePositions(ByteBuffer b) throws Exception {
+ // This goes into Memory.pokeByte(), which is an intrinsic that has
+ // kWriteSideEffects. Stores before this call need to be kept.
+ b.put((byte) 0);
+ assertEquals(1, b.position());
+ }
+
+ private static ByteBuffer allocateMapped(int size) throws Exception {
+ File f = File.createTempFile("mapped", "tmp");
+ f.deleteOnExit();
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ raf.setLength(size);
+ FileChannel ch = raf.getChannel();
+ MappedByteBuffer result = ch.map(FileChannel.MapMode.READ_WRITE, 0, size);
+ ch.close();
+ return result;
+ }
+
+ public static void testRelativePositionsMapped() throws Exception {
+ testRelativePositions(allocateMapped(10));
+ }
+
+ public static void main(String[] args) throws Exception {
+ testRelativePositionsMapped();
+ }
+}
diff --git a/test/603-checker-instanceof/src/Main.java b/test/603-checker-instanceof/src/Main.java
index 1487969c03..2c97bedbaa 100644
--- a/test/603-checker-instanceof/src/Main.java
+++ b/test/603-checker-instanceof/src/Main.java
@@ -59,7 +59,7 @@ public class Main {
/// CHECK: InstanceOf check_kind:exact_check
/// CHECK-NOT: {{.*gs:.*}}
- /// CHECK-START-{ARM,ARM64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
/// CHECK: InstanceOf check_kind:exact_check
// For ARM and ARM64, the marking register (r8 and x20, respectively) can be used in
// non-CC configs for any other purpose, so we'd need a config-specific checker test.
diff --git a/test/608-checker-unresolved-lse/src/Main.java b/test/608-checker-unresolved-lse/src/Main.java
index c6f8854b49..a39dd51bdf 100644
--- a/test/608-checker-unresolved-lse/src/Main.java
+++ b/test/608-checker-unresolved-lse/src/Main.java
@@ -88,7 +88,6 @@ public class Main extends MissingSuperClass {
/// CHECK-START: void Main.staticFieldTest() load_store_elimination (after)
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK: UnresolvedStaticFieldGet
public static void staticFieldTest() {
// Ensure Foo is initialized.
diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt
index 52e756c231..5d4833aca8 100644
--- a/test/639-checker-code-sinking/expected.txt
+++ b/test/639-checker-code-sinking/expected.txt
@@ -1,3 +1,3 @@
0
class java.lang.Object
-43
+42
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
index 7496925adc..a1c30f7b4e 100644
--- a/test/639-checker-code-sinking/src/Main.java
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -337,7 +337,7 @@ public class Main {
public static void testStoreStore(boolean doThrow) {
Main m = new Main();
m.intField = 42;
- m.intField = 43;
+ m.intField2 = 43;
if (doThrow) {
throw new Error(m.$opt$noinline$toString());
}
@@ -349,6 +349,7 @@ public class Main {
volatile int volatileField;
int intField;
+ int intField2;
Object objectField;
static boolean doThrow;
static boolean doLoop;
diff --git a/test/651-checker-int-simd-minmax/src/Main.java b/test/651-checker-int-simd-minmax/src/Main.java
index 66343adaa8..cfa0ae7dca 100644
--- a/test/651-checker-int-simd-minmax/src/Main.java
+++ b/test/651-checker-int-simd-minmax/src/Main.java
@@ -27,10 +27,10 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Min>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMin(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Min:d\d+>> VecMin [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Min>>] loop:<<Loop>> outer_loop:none
private static void doitMin(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
@@ -46,10 +46,10 @@ public class Main {
/// CHECK-DAG: ArraySet [{{l\d+}},<<Phi>>,<<Max>>] loop:<<Loop>> outer_loop:none
//
/// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitMax(int[], int[], int[]) loop_optimization (after)
- /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] unsigned:false loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:d\d+>> VecLoad loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Get2:d\d+>> VecLoad loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Max:d\d+>> VecMax [<<Get1>>,<<Get2>>] packed_type:Int32 loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: VecStore [{{l\d+}},{{i\d+}},<<Max>>] loop:<<Loop>> outer_loop:none
private static void doitMax(int[] x, int[] y, int[] z) {
int min = Math.min(x.length, Math.min(y.length, z.length));
for (int i = 0; i < min; i++) {
diff --git a/test/672-checker-throw-method/expected.txt b/test/672-checker-throw-method/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/672-checker-throw-method/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/672-checker-throw-method/info.txt b/test/672-checker-throw-method/info.txt
new file mode 100644
index 0000000000..250810be15
--- /dev/null
+++ b/test/672-checker-throw-method/info.txt
@@ -0,0 +1 @@
+Test detecting throwing methods for code sinking.
diff --git a/test/672-checker-throw-method/src/Main.java b/test/672-checker-throw-method/src/Main.java
new file mode 100644
index 0000000000..a507133b91
--- /dev/null
+++ b/test/672-checker-throw-method/src/Main.java
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for detecting throwing methods for code sinking.
+ */
+public class Main {
+
+ //
+ // Some "runtime library" methods.
+ //
+
+ static private void doThrow(String par) {
+ throw new Error("you are null: " + par);
+ }
+
+ static private void checkNotNullDirect(Object obj, String par) {
+ if (obj == null)
+ throw new Error("you are null: " + par);
+ }
+
+ static private void checkNotNullSplit(Object obj, String par) {
+ if (obj == null)
+ doThrow(par);
+ }
+
+ static private void checkNotNullSplitAlt(Object obj, String par) {
+ if (obj != null)
+ return;
+ doThrow(par);
+ }
+
+ //
+ // Various ways of enforcing non-null parameter.
+ // In all cases, par should be subject to code sinking.
+ //
+
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ static public void doit1(int[] a) {
+ String par = "a";
+ if (a == null)
+ throw new Error("you are null: " + par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 1;
+ }
+ }
+
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ static public void doit2(int[] a) {
+ String par = "a";
+ if (a == null)
+ doThrow(par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ static public void doit3(int[] a) {
+ String par = "a";
+ checkNotNullDirect(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 3;
+ }
+ }
+
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ static public void doit4(int[] a) {
+ String par = "a";
+ checkNotNullSplit(a, par); // resembles Kotlin runtime lib
+ // (test is lined, doThrow is not)
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 4;
+ }
+ }
+
+ // Ensures Phi values are merged properly.
+ static public int doit5(int[] a) {
+ int t = 100;
+ String par = "a";
+ if (a == null) {
+ doThrow(par);
+ } else {
+ t = 1000;
+ }
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 5;
+ }
+ // Phi on t, even though doThrow never reaches.
+ return t;
+ }
+
+ //
+ // Various ways of exploiting non-null parameter.
+ // In all cases, implicit null checks are redundant.
+ //
+
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: <<Par:l\d+>> ParameterValue
+ /// CHECK: <<Zero:i\d+>> IntConstant 0
+ /// CHECK: <<Null:l\d+>> NullCheck [<<Par>>]
+ /// CHECK: <<Len:i\d+>> ArrayLength [<<Null>>]
+ /// CHECK: <<Check:i\d+>> BoundsCheck [<<Zero>>,<<Len>>]
+ /// CHECK: <<Get:i\d+>> ArrayGet [<<Null>>,<<Check>>]
+ /// CHECK: Return [<<Get>>]
+ //
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK: <<Par:l\d+>> ParameterValue
+ /// CHECK: <<Zero:i\d+>> IntConstant 0
+ /// CHECK: <<BT:l\d+>> BoundType [<<Par>>]
+ /// CHECK: <<Len:i\d+>> ArrayLength [<<BT>>]
+ /// CHECK: <<Check:i\d+>> BoundsCheck [<<Zero>>,<<Len>>]
+ /// CHECK: <<Get:i\d+>> ArrayGet [<<BT>>,<<Check>>]
+ /// CHECK: Return [<<Get>>]
+ //
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullCheck(int[] a) {
+ checkNotNullSplit(a, "a");
+ return a[0];
+ }
+
+ /// CHECK-START: int Main.deleteNullCheckAlt(int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: NullCheck
+ //
+ /// CHECK-START: int Main.deleteNullCheckAlt(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullCheckAlt(int[] a) {
+ checkNotNullSplitAlt(a, "a");
+ return a[0];
+ }
+
+ /// CHECK-START: int Main.deleteNullChecks3(int[], int[], int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: NullCheck
+ /// CHECK: NullCheck
+ /// CHECK: NullCheck
+ //
+ /// CHECK-START: int Main.deleteNullChecks3(int[], int[], int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullChecks3(int[] a, int[] b, int[] c) {
+ checkNotNullSplit(a, "a");
+ checkNotNullSplit(b, "b");
+ checkNotNullSplit(c, "c");
+ return a[0] + b[0] + c[0];
+ }
+
+ //
+ // Test driver.
+ //
+
+ static public void main(String[] args) {
+ int[] a = new int[100];
+ for (int i = 0; i < 100; i++) {
+ a[i] = 0;
+ }
+
+ try {
+ doit1(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit1(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(1, a[i]);
+ }
+
+ try {
+ doit2(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit2(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(2, a[i]);
+ }
+
+ try {
+ doit3(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit3(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(3, a[i]);
+ }
+
+ try {
+ doit4(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit4(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(4, a[i]);
+ }
+
+ try {
+ doit5(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ expectEquals(1000, doit5(a));
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(5, a[i]);
+ }
+
+ int[] x = { 11 } ;
+ expectEquals(11, deleteNullCheck(x));
+ int[] y = { 55 } ;
+ int[] z = { 22 } ;
+ expectEquals(88, deleteNullChecks3(x, y, z));
+
+ try {
+ deleteNullCheck(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/673-checker-throw-vmethod/expected.txt b/test/673-checker-throw-vmethod/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/673-checker-throw-vmethod/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/673-checker-throw-vmethod/info.txt b/test/673-checker-throw-vmethod/info.txt
new file mode 100644
index 0000000000..250810be15
--- /dev/null
+++ b/test/673-checker-throw-vmethod/info.txt
@@ -0,0 +1 @@
+Test detecting throwing methods for code sinking.
diff --git a/test/673-checker-throw-vmethod/src/Main.java b/test/673-checker-throw-vmethod/src/Main.java
new file mode 100644
index 0000000000..d0e1591bdb
--- /dev/null
+++ b/test/673-checker-throw-vmethod/src/Main.java
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for detecting throwing methods for code sinking.
+ */
+public class Main {
+
+ //
+ // Some "runtime library" methods.
+ //
+
+ public final void doThrow(String par) {
+ throw new Error("you are null: " + par);
+ }
+
+ public final void checkNotNullDirect(Object obj, String par) {
+ if (obj == null)
+ throw new Error("you are null: " + par);
+ }
+
+ public final void checkNotNullSplit(Object obj, String par) {
+ if (obj == null)
+ doThrow(par);
+ }
+
+ //
+ // Various ways of enforcing non-null parameter.
+ // In all cases, par should be subject to code sinking.
+ //
+
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ public void doit1(int[] a) {
+ String par = "a";
+ if (a == null)
+ throw new Error("you are null: " + par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 1;
+ }
+ }
+
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ public void doit2(int[] a) {
+ String par = "a";
+ if (a == null)
+ doThrow(par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ public void doit3(int[] a) {
+ String par = "a";
+ checkNotNullDirect(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 3;
+ }
+ }
+
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ public void doit4(int[] a) {
+ String par = "a";
+ checkNotNullSplit(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 4;
+ }
+ }
+
+ //
+ // Test driver.
+ //
+
+ static public void main(String[] args) {
+ int[] a = new int[100];
+ for (int i = 0; i < 100; i++) {
+ a[i] = 0;
+ }
+
+ Main m = new Main();
+
+ try {
+ m.doit1(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit1(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(1, a[i]);
+ }
+
+ try {
+ m.doit2(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit2(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(2, a[i]);
+ }
+
+ try {
+ m.doit3(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit3(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(3, a[i]);
+ }
+
+ try {
+ m.doit4(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit4(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(4, a[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/674-HelloWorld-Dm/expected.txt b/test/674-HelloWorld-Dm/expected.txt
new file mode 100644
index 0000000000..af5626b4a1
--- /dev/null
+++ b/test/674-HelloWorld-Dm/expected.txt
@@ -0,0 +1 @@
+Hello, world!
diff --git a/test/674-HelloWorld-Dm/info.txt b/test/674-HelloWorld-Dm/info.txt
new file mode 100644
index 0000000000..3a769c48a6
--- /dev/null
+++ b/test/674-HelloWorld-Dm/info.txt
@@ -0,0 +1 @@
+Hello World test with --dm-file passed to dex2oat.
diff --git a/test/674-HelloWorld-Dm/run b/test/674-HelloWorld-Dm/run
new file mode 100644
index 0000000000..199ffc31e1
--- /dev/null
+++ b/test/674-HelloWorld-Dm/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} --dm "${@}"
diff --git a/test/674-HelloWorld-Dm/src/Main.java b/test/674-HelloWorld-Dm/src/Main.java
new file mode 100644
index 0000000000..1ef6289559
--- /dev/null
+++ b/test/674-HelloWorld-Dm/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println("Hello, world!");
+ }
+}
diff --git a/test/674-hiddenapi/api-blacklist.txt b/test/674-hiddenapi/api-blacklist.txt
new file mode 100644
index 0000000000..d43360c62f
--- /dev/null
+++ b/test/674-hiddenapi/api-blacklist.txt
@@ -0,0 +1,25 @@
+LNullaryConstructorBlacklist;-><init>()V
+LParentClass;->fieldPublicBlacklist:I
+LParentClass;->fieldPackageBlacklist:I
+LParentClass;->fieldProtectedBlacklist:I
+LParentClass;->fieldPrivateBlacklist:I
+LParentClass;->fieldPublicStaticBlacklist:I
+LParentClass;->fieldPackageStaticBlacklist:I
+LParentClass;->fieldProtectedStaticBlacklist:I
+LParentClass;->fieldPrivateStaticBlacklist:I
+LParentClass;->methodPublicBlacklist()I
+LParentClass;->methodPackageBlacklist()I
+LParentClass;->methodProtectedBlacklist()I
+LParentClass;->methodPrivateBlacklist()I
+LParentClass;->methodPublicStaticBlacklist()I
+LParentClass;->methodPackageStaticBlacklist()I
+LParentClass;->methodProtectedStaticBlacklist()I
+LParentClass;->methodPrivateStaticBlacklist()I
+LParentClass;-><init>(IC)V
+LParentClass;-><init>(FC)V
+LParentClass;-><init>(JC)V
+LParentClass;-><init>(DC)V
+LParentInterface;->fieldPublicStaticBlacklist:I
+LParentInterface;->methodPublicBlacklist()I
+LParentInterface;->methodPublicStaticBlacklist()I
+LParentInterface;->methodPublicDefaultBlacklist()I \ No newline at end of file
diff --git a/test/674-hiddenapi/api-dark-greylist.txt b/test/674-hiddenapi/api-dark-greylist.txt
new file mode 100644
index 0000000000..d0f35f64bc
--- /dev/null
+++ b/test/674-hiddenapi/api-dark-greylist.txt
@@ -0,0 +1,25 @@
+LNullaryConstructorDarkGreylist;-><init>()V
+LParentClass;->fieldPublicDarkGreylist:I
+LParentClass;->fieldPackageDarkGreylist:I
+LParentClass;->fieldProtectedDarkGreylist:I
+LParentClass;->fieldPrivateDarkGreylist:I
+LParentClass;->fieldPublicStaticDarkGreylist:I
+LParentClass;->fieldPackageStaticDarkGreylist:I
+LParentClass;->fieldProtectedStaticDarkGreylist:I
+LParentClass;->fieldPrivateStaticDarkGreylist:I
+LParentClass;->methodPublicDarkGreylist()I
+LParentClass;->methodPackageDarkGreylist()I
+LParentClass;->methodProtectedDarkGreylist()I
+LParentClass;->methodPrivateDarkGreylist()I
+LParentClass;->methodPublicStaticDarkGreylist()I
+LParentClass;->methodPackageStaticDarkGreylist()I
+LParentClass;->methodProtectedStaticDarkGreylist()I
+LParentClass;->methodPrivateStaticDarkGreylist()I
+LParentClass;-><init>(IB)V
+LParentClass;-><init>(FB)V
+LParentClass;-><init>(JB)V
+LParentClass;-><init>(DB)V
+LParentInterface;->fieldPublicStaticDarkGreylist:I
+LParentInterface;->methodPublicDarkGreylist()I
+LParentInterface;->methodPublicStaticDarkGreylist()I
+LParentInterface;->methodPublicDefaultDarkGreylist()I \ No newline at end of file
diff --git a/test/674-hiddenapi/api-light-greylist.txt b/test/674-hiddenapi/api-light-greylist.txt
new file mode 100644
index 0000000000..2809025cfd
--- /dev/null
+++ b/test/674-hiddenapi/api-light-greylist.txt
@@ -0,0 +1,25 @@
+LNullaryConstructorLightGreylist;-><init>()V
+LParentClass;->fieldPublicLightGreylist:I
+LParentClass;->fieldPackageLightGreylist:I
+LParentClass;->fieldProtectedLightGreylist:I
+LParentClass;->fieldPrivateLightGreylist:I
+LParentClass;->fieldPublicStaticLightGreylist:I
+LParentClass;->fieldPackageStaticLightGreylist:I
+LParentClass;->fieldProtectedStaticLightGreylist:I
+LParentClass;->fieldPrivateStaticLightGreylist:I
+LParentClass;->methodPublicLightGreylist()I
+LParentClass;->methodPackageLightGreylist()I
+LParentClass;->methodProtectedLightGreylist()I
+LParentClass;->methodPrivateLightGreylist()I
+LParentClass;->methodPublicStaticLightGreylist()I
+LParentClass;->methodPackageStaticLightGreylist()I
+LParentClass;->methodProtectedStaticLightGreylist()I
+LParentClass;->methodPrivateStaticLightGreylist()I
+LParentClass;-><init>(IZ)V
+LParentClass;-><init>(FZ)V
+LParentClass;-><init>(JZ)V
+LParentClass;-><init>(DZ)V
+LParentInterface;->fieldPublicStaticLightGreylist:I
+LParentInterface;->methodPublicLightGreylist()I
+LParentInterface;->methodPublicStaticLightGreylist()I
+LParentInterface;->methodPublicDefaultLightGreylist()I \ No newline at end of file
diff --git a/test/674-hiddenapi/build b/test/674-hiddenapi/build
new file mode 100644
index 0000000000..330a6def29
--- /dev/null
+++ b/test/674-hiddenapi/build
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Build the jars twice. First with applying hiddenapi, creating a boot jar, then
+# a second time without to create a normal jar. We need to do this because we
+# want to load the jar once as an app module and once as a member of the boot
+# class path. The DexFileVerifier would fail on the former as it does not allow
+# hidden API access flags in dex files. DexFileVerifier is not invoked on boot
+# class path dex files, so the boot jar loads fine in the latter case.
+
+export USE_HIDDENAPI=true
+./default-build "$@"
+
+# Move the jar file into the resource folder to be bundled with the test.
+mkdir res
+mv ${TEST_NAME}.jar res/boot.jar
+
+# Clear all intermediate files otherwise default-build would either skip
+# compilation or fail rebuilding.
+rm -rf classes*
+
+export USE_HIDDENAPI=false
+./default-build "$@"
diff --git a/test/674-hiddenapi/check b/test/674-hiddenapi/check
new file mode 100644
index 0000000000..c319a0ae97
--- /dev/null
+++ b/test/674-hiddenapi/check
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Remove pid and date from the log messages.
+grep -vE '^dalvikvm(32|64) E [^]]+]' "$2" \
+ | grep -v JNI_OnLoad \
+ | grep -v JNI_OnUnload \
+ > "$2.tmp"
+
+./default-check "$1" "$2.tmp"
diff --git a/test/674-hiddenapi/expected.txt b/test/674-hiddenapi/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/674-hiddenapi/expected.txt
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
new file mode 100644
index 0000000000..effa37ade4
--- /dev/null
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "dex/art_dex_file_loader.h"
+#include "jni.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread.h"
+#include "ti-agent/scoped_utf_chars.h"
+
+namespace art {
+namespace Test674HiddenApi {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
+ Runtime* runtime = Runtime::Current();
+ runtime->SetHiddenApiChecksEnabled(true);
+ runtime->SetDedupeHiddenApiWarnings(false);
+ runtime->AlwaysSetHiddenApiWarningFlag();
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_appendToBootClassLoader(
+ JNIEnv* env, jclass, jstring jpath) {
+ ScopedUtfChars utf(env, jpath);
+ const char* path = utf.c_str();
+ if (path == nullptr) {
+ return;
+ }
+
+ ArtDexFileLoader dex_loader;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ if (!dex_loader.Open(path,
+ path,
+ /* verify */ false,
+ /* verify_checksum */ true,
+ &error_msg,
+ &dex_files)) {
+ LOG(FATAL) << "Could not open " << path << " for boot classpath extension: " << error_msg;
+ UNREACHABLE();
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ Runtime::Current()->GetClassLinker()->AppendToBootClassPath(
+ Thread::Current(), *dex_file.release());
+ }
+}
+
+static jobject NewInstance(JNIEnv* env, jclass klass) {
+ jmethodID constructor = env->GetMethodID(klass, "<init>", "()V");
+ if (constructor == NULL) {
+ return NULL;
+ }
+ return env->NewObject(klass, constructor);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverField(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
+ : env->GetFieldID(klass, utf_name.c_str(), "I");
+ if (field == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canGetField(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
+ : env->GetFieldID(klass, utf_name.c_str(), "I");
+ if (field == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ if (is_static) {
+ env->GetStaticIntField(klass, field);
+ } else {
+ jobject obj = NewInstance(env, klass);
+ if (obj == NULL) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ env->GetIntField(obj, field);
+ }
+
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canSetField(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jfieldID field = is_static ? env->GetStaticFieldID(klass, utf_name.c_str(), "I")
+ : env->GetFieldID(klass, utf_name.c_str(), "I");
+ if (field == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ if (is_static) {
+ env->SetStaticIntField(klass, field, 42);
+ } else {
+ jobject obj = NewInstance(env, klass);
+ if (obj == NULL) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ env->SetIntField(obj, field, 42);
+ }
+
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverMethod(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
+ : env->GetMethodID(klass, utf_name.c_str(), "()I");
+ if (method == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodA(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
+ : env->GetMethodID(klass, utf_name.c_str(), "()I");
+ if (method == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ if (is_static) {
+ env->CallStaticIntMethodA(klass, method, nullptr);
+ } else {
+ jobject obj = NewInstance(env, klass);
+ if (obj == NULL) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ env->CallIntMethodA(obj, method, nullptr);
+ }
+
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeMethodV(
+ JNIEnv* env, jclass, jclass klass, jstring name, jboolean is_static) {
+ ScopedUtfChars utf_name(env, name);
+ jmethodID method = is_static ? env->GetStaticMethodID(klass, utf_name.c_str(), "()I")
+ : env->GetMethodID(klass, utf_name.c_str(), "()I");
+ if (method == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ if (is_static) {
+ env->CallStaticIntMethod(klass, method);
+ } else {
+ jobject obj = NewInstance(env, klass);
+ if (obj == NULL) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+ env->CallIntMethod(obj, method);
+ }
+
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+static constexpr size_t kConstructorSignatureLength = 5; // e.g. (IZ)V
+static constexpr size_t kNumConstructorArgs = kConstructorSignatureLength - 3;
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canDiscoverConstructor(
+ JNIEnv* env, jclass, jclass klass, jstring args) {
+ ScopedUtfChars utf_args(env, args);
+ jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
+ if (constructor == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeConstructorA(
+ JNIEnv* env, jclass, jclass klass, jstring args) {
+ ScopedUtfChars utf_args(env, args);
+ jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
+ if (constructor == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ // CheckJNI won't allow out-of-range values, so just zero everything.
+ CHECK_EQ(strlen(utf_args.c_str()), kConstructorSignatureLength);
+ size_t initargs_size = sizeof(jvalue) * kNumConstructorArgs;
+ jvalue *initargs = reinterpret_cast<jvalue*>(alloca(initargs_size));
+ memset(initargs, 0, initargs_size);
+
+ env->NewObjectA(klass, constructor, initargs);
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_JNI_canInvokeConstructorV(
+ JNIEnv* env, jclass, jclass klass, jstring args) {
+ ScopedUtfChars utf_args(env, args);
+ jmethodID constructor = env->GetMethodID(klass, "<init>", utf_args.c_str());
+ if (constructor == NULL) {
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ // CheckJNI won't allow out-of-range values, so just zero everything.
+ CHECK_EQ(strlen(utf_args.c_str()), kConstructorSignatureLength);
+ size_t initargs_size = sizeof(jvalue) * kNumConstructorArgs;
+ jvalue *initargs = reinterpret_cast<jvalue*>(alloca(initargs_size));
+ memset(initargs, 0, initargs_size);
+
+ static_assert(kNumConstructorArgs == 2, "Change the varargs below if you change the constant");
+ env->NewObject(klass, constructor, initargs[0], initargs[1]);
+ if (env->ExceptionOccurred()) {
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Reflection_getHiddenApiAccessFlags(JNIEnv*, jclass) {
+ return static_cast<jint>(kAccHiddenApiBits);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_ChildClass_hasPendingWarning(JNIEnv*, jclass) {
+ return Runtime::Current()->HasPendingHiddenApiWarning();
+}
+
+extern "C" JNIEXPORT void JNICALL Java_ChildClass_clearWarning(JNIEnv*, jclass) {
+ Runtime::Current()->SetPendingHiddenApiWarning(false);
+}
+
+} // namespace Test674HiddenApi
+} // namespace art
diff --git a/test/674-hiddenapi/info.txt b/test/674-hiddenapi/info.txt
new file mode 100644
index 0000000000..25ac6ae78f
--- /dev/null
+++ b/test/674-hiddenapi/info.txt
@@ -0,0 +1,15 @@
+Test whether hidden API access flags are being enforced. The test is composed of
+two JARs. The first (parent) defines methods and fields and the second (child)
+tries to access them with reflection/JNI or link against them. Note that the
+first is compiled twice - once with and once without hidden access flags.
+
+The test then proceeds to exercise the following combinations of class loading:
+(a) Both parent and child dex loaded with PathClassLoader, parent's class loader
+ is the child's class loader's parent. Access flags should not be enforced as
+ the parent does not belong to boot class path.
+(b) Parent is appended to boot class path, child is loaded with PathClassLoader.
+ In this situation child should not be able to access hidden methods/fields
+ of the parent.
+(c) Both parent and child are appended to boot class path. Restrictions should
+ not apply as hidden APIs are accessible within the boundaries of the boot
+ class path.
diff --git a/test/674-hiddenapi/src-art/Main.java b/test/674-hiddenapi/src-art/Main.java
new file mode 100644
index 0000000000..a808e946a9
--- /dev/null
+++ b/test/674-hiddenapi/src-art/Main.java
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.InMemoryDexClassLoader;
+import dalvik.system.PathClassLoader;
+import java.io.File;
+import java.io.InputStream;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.Arrays;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ prepareNativeLibFileName(args[0]);
+
+ // Enable hidden API checks in case they are disabled by default.
+ init();
+
+ // Run test with both parent and child dex files loaded with class loaders.
+ // The expectation is that hidden members in parent should be visible to
+ // the child.
+ doTest(false, false);
+ doUnloading();
+
+ // Now append parent dex file to boot class path and run again. This time
+ // the child dex file should not be able to access private APIs of the parent.
+ appendToBootClassLoader(DEX_PARENT_BOOT);
+ doTest(true, false);
+ doUnloading();
+
+ // And finally append to child to boot class path as well. With both in the
+ // boot class path, access should be granted.
+ appendToBootClassLoader(DEX_CHILD);
+ doTest(true, true);
+ doUnloading();
+ }
+
+ private static void doTest(boolean parentInBoot, boolean childInBoot) throws Exception {
+ // Load parent dex if it is not in boot class path.
+ ClassLoader parentLoader = null;
+ if (parentInBoot) {
+ parentLoader = BOOT_CLASS_LOADER;
+ } else {
+ parentLoader = new PathClassLoader(DEX_PARENT, ClassLoader.getSystemClassLoader());
+ }
+
+ // Load child dex if it is not in boot class path.
+ ClassLoader childLoader = null;
+ if (childInBoot) {
+ if (parentLoader != BOOT_CLASS_LOADER) {
+ throw new IllegalStateException(
+ "DeclaringClass must be in parent class loader of CallingClass");
+ }
+ childLoader = BOOT_CLASS_LOADER;
+ } else {
+ childLoader = new InMemoryDexClassLoader(readDexFile(DEX_CHILD), parentLoader);
+ }
+
+ // Create a unique copy of the native library. Each shared library can only
+ // be loaded once, but for some reason even classes from a class loader
+ // cannot register their native methods against symbols in a shared library
+ // loaded by their parent class loader.
+ String nativeLibCopy = createNativeLibCopy(parentInBoot, childInBoot);
+
+ // Invoke ChildClass.runTest
+ Class.forName("ChildClass", true, childLoader)
+ .getDeclaredMethod("runTest", String.class, Boolean.TYPE, Boolean.TYPE)
+ .invoke(null, nativeLibCopy, parentInBoot, childInBoot);
+ }
+
+ // Routine which tries to figure out the absolute path of our native library.
+ private static void prepareNativeLibFileName(String arg) throws Exception {
+ String libName = System.mapLibraryName(arg);
+ Method libPathsMethod = Runtime.class.getDeclaredMethod("getLibPaths");
+ libPathsMethod.setAccessible(true);
+ String[] libPaths = (String[]) libPathsMethod.invoke(Runtime.getRuntime());
+ nativeLibFileName = null;
+ for (String p : libPaths) {
+ String candidate = p + libName;
+ if (new File(candidate).exists()) {
+ nativeLibFileName = candidate;
+ break;
+ }
+ }
+ if (nativeLibFileName == null) {
+ throw new IllegalStateException("Didn't find " + libName + " in " +
+ Arrays.toString(libPaths));
+ }
+ }
+
+ // Helper to read dex file into memory.
+ private static ByteBuffer readDexFile(String jarFileName) throws Exception {
+ ZipFile zip = new ZipFile(new File(jarFileName));
+ ZipEntry entry = zip.getEntry("classes.dex");
+ InputStream is = zip.getInputStream(entry);
+ int offset = 0;
+ int size = (int) entry.getSize();
+ ByteBuffer buffer = ByteBuffer.allocate(size);
+ while (is.available() > 0) {
+ is.read(buffer.array(), offset, size - offset);
+ }
+ is.close();
+ zip.close();
+ return buffer;
+ }
+
+ // Copy native library to a new file with a unique name so it does not conflict
+ // with other loaded instance of the same binary file.
+ private static String createNativeLibCopy(boolean parentInBoot, boolean childInBoot)
+ throws Exception {
+ String tempFileName = System.mapLibraryName(
+ "hiddenapitest_" + (parentInBoot ? "1" : "0") + (childInBoot ? "1" : "0"));
+ File tempFile = new File(System.getenv("DEX_LOCATION"), tempFileName);
+ Files.copy(new File(nativeLibFileName).toPath(), tempFile.toPath());
+ return tempFile.getAbsolutePath();
+ }
+
+ private static void doUnloading() {
+ // Do multiple GCs to prevent rare flakiness if some other thread is keeping the
+ // classloader live.
+ for (int i = 0; i < 5; ++i) {
+ Runtime.getRuntime().gc();
+ }
+ }
+
+ private static String nativeLibFileName;
+
+ private static final String DEX_PARENT =
+ new File(System.getenv("DEX_LOCATION"), "674-hiddenapi.jar").getAbsolutePath();
+ private static final String DEX_PARENT_BOOT =
+ new File(new File(System.getenv("DEX_LOCATION"), "res"), "boot.jar").getAbsolutePath();
+ private static final String DEX_CHILD =
+ new File(System.getenv("DEX_LOCATION"), "674-hiddenapi-ex.jar").getAbsolutePath();
+
+ private static ClassLoader BOOT_CLASS_LOADER = Object.class.getClassLoader();
+
+ private static native void appendToBootClassLoader(String dexPath);
+ private static native void init();
+}
diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java
new file mode 100644
index 0000000000..babd88359b
--- /dev/null
+++ b/test/674-hiddenapi/src-ex/ChildClass.java
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.lang.model.element.AnnotationMirror;
+import javax.lang.model.type.PrimitiveType;
+import javax.lang.model.type.TypeKind;
+import javax.lang.model.type.TypeVisitor;
+
+public class ChildClass {
+ enum PrimitiveType {
+ TInteger('I', Integer.TYPE, Integer.valueOf(0)),
+ TLong('J', Long.TYPE, Long.valueOf(0)),
+ TFloat('F', Float.TYPE, Float.valueOf(0)),
+ TDouble('D', Double.TYPE, Double.valueOf(0)),
+ TBoolean('Z', Boolean.TYPE, Boolean.valueOf(false)),
+ TByte('B', Byte.TYPE, Byte.valueOf((byte) 0)),
+ TShort('S', Short.TYPE, Short.valueOf((short) 0)),
+ TCharacter('C', Character.TYPE, Character.valueOf('0'));
+
+ PrimitiveType(char shorty, Class klass, Object value) {
+ mShorty = shorty;
+ mClass = klass;
+ mDefaultValue = value;
+ }
+
+ public char mShorty;
+ public Class mClass;
+ public Object mDefaultValue;
+ }
+
+ enum Hiddenness {
+ Whitelist(PrimitiveType.TShort),
+ LightGreylist(PrimitiveType.TBoolean),
+ DarkGreylist(PrimitiveType.TByte),
+ Blacklist(PrimitiveType.TCharacter);
+
+ Hiddenness(PrimitiveType type) { mAssociatedType = type; }
+ public PrimitiveType mAssociatedType;
+ }
+
+ enum Visibility {
+ Public(PrimitiveType.TInteger),
+ Package(PrimitiveType.TFloat),
+ Protected(PrimitiveType.TLong),
+ Private(PrimitiveType.TDouble);
+
+ Visibility(PrimitiveType type) { mAssociatedType = type; }
+ public PrimitiveType mAssociatedType;
+ }
+
+ enum Behaviour {
+ Granted,
+ Warning,
+ Denied,
+ }
+
+ private static final boolean booleanValues[] = new boolean[] { false, true };
+
+ public static void runTest(String libFileName, boolean expectedParentInBoot,
+ boolean expectedChildInBoot) throws Exception {
+ System.load(libFileName);
+
+ // Check expectations about loading into boot class path.
+ isParentInBoot = (ParentClass.class.getClassLoader().getParent() == null);
+ if (isParentInBoot != expectedParentInBoot) {
+ throw new RuntimeException("Expected ParentClass " +
+ (expectedParentInBoot ? "" : "not ") + "in boot class path");
+ }
+ isChildInBoot = (ChildClass.class.getClassLoader().getParent() == null);
+ if (isChildInBoot != expectedChildInBoot) {
+ throw new RuntimeException("Expected ChildClass " + (expectedChildInBoot ? "" : "not ") +
+ "in boot class path");
+ }
+
+ boolean isSameBoot = (isParentInBoot == isChildInBoot);
+
+ // Run meaningful combinations of access flags.
+ for (Hiddenness hiddenness : Hiddenness.values()) {
+ final Behaviour expected;
+ if (isSameBoot || hiddenness == Hiddenness.Whitelist) {
+ expected = Behaviour.Granted;
+ } else if (hiddenness == Hiddenness.Blacklist) {
+ expected = Behaviour.Denied;
+ } else {
+ expected = Behaviour.Warning;
+ }
+
+ for (boolean isStatic : booleanValues) {
+ String suffix = (isStatic ? "Static" : "") + hiddenness.name();
+
+ for (Visibility visibility : Visibility.values()) {
+ // Test reflection and JNI on methods and fields
+ for (Class klass : new Class<?>[] { ParentClass.class, ParentInterface.class }) {
+ String baseName = visibility.name() + suffix;
+ checkField(klass, "field" + baseName, isStatic, visibility, expected);
+ checkMethod(klass, "method" + baseName, isStatic, visibility, expected);
+ }
+
+ // Check whether one can use a class constructor.
+ checkConstructor(ParentClass.class, visibility, hiddenness, expected);
+
+ // Check whether you can use an interface default method.
+ String name = "method" + visibility.name() + "Default" + hiddenness.name();
+ checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected);
+ }
+
+ // Test whether static linking succeeds.
+ checkLinking("LinkFieldGet" + suffix, /*takesParameter*/ false, expected);
+ checkLinking("LinkFieldSet" + suffix, /*takesParameter*/ true, expected);
+ checkLinking("LinkMethod" + suffix, /*takesParameter*/ false, expected);
+ }
+
+ // Check whether Class.newInstance succeeds.
+ checkNullaryConstructor(Class.forName("NullaryConstructor" + hiddenness.name()), expected);
+ }
+ }
+
+ private static void checkField(Class<?> klass, String name, boolean isStatic,
+ Visibility visibility, Behaviour behaviour) throws Exception {
+
+ boolean isPublic = (visibility == Visibility.Public);
+ boolean canDiscover = (behaviour != Behaviour.Denied);
+ boolean setsWarning = (behaviour == Behaviour.Warning);
+
+ if (klass.isInterface() && (!isStatic || !isPublic)) {
+ // Interfaces only have public static fields.
+ return;
+ }
+
+ // Test discovery with reflection.
+
+ if (Reflection.canDiscoverWithGetDeclaredField(klass, name) != canDiscover) {
+ throwDiscoveryException(klass, name, true, "getDeclaredField()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetDeclaredFields(klass, name) != canDiscover) {
+ throwDiscoveryException(klass, name, true, "getDeclaredFields()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetField(klass, name) != (canDiscover && isPublic)) {
+ throwDiscoveryException(klass, name, true, "getField()", (canDiscover && isPublic));
+ }
+
+ if (Reflection.canDiscoverWithGetFields(klass, name) != (canDiscover && isPublic)) {
+ throwDiscoveryException(klass, name, true, "getFields()", (canDiscover && isPublic));
+ }
+
+ // Test discovery with JNI.
+
+ if (JNI.canDiscoverField(klass, name, isStatic) != canDiscover) {
+ throwDiscoveryException(klass, name, true, "JNI", canDiscover);
+ }
+
+ // Finish here if we could not discover the field.
+
+ if (!canDiscover) {
+ return;
+ }
+
+ // Test that modifiers are unaffected.
+
+ if (Reflection.canObserveFieldHiddenAccessFlags(klass, name)) {
+ throwModifiersException(klass, name, true);
+ }
+
+ // Test getters and setters when meaningful.
+
+ clearWarning();
+ if (!Reflection.canGetField(klass, name)) {
+ throwAccessException(klass, name, true, "Field.getInt()");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, true, "Field.getInt()", setsWarning);
+ }
+
+ clearWarning();
+ if (!Reflection.canSetField(klass, name)) {
+ throwAccessException(klass, name, true, "Field.setInt()");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, true, "Field.setInt()", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canGetField(klass, name, isStatic)) {
+ throwAccessException(klass, name, true, "getIntField");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, true, "getIntField", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canSetField(klass, name, isStatic)) {
+ throwAccessException(klass, name, true, "setIntField");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, true, "setIntField", setsWarning);
+ }
+ }
+
+ private static void checkMethod(Class<?> klass, String name, boolean isStatic,
+ Visibility visibility, Behaviour behaviour) throws Exception {
+
+ boolean isPublic = (visibility == Visibility.Public);
+ if (klass.isInterface() && !isPublic) {
+ // All interface members are public.
+ return;
+ }
+
+ boolean canDiscover = (behaviour != Behaviour.Denied);
+ boolean setsWarning = (behaviour == Behaviour.Warning);
+
+ // Test discovery with reflection.
+
+ if (Reflection.canDiscoverWithGetDeclaredMethod(klass, name) != canDiscover) {
+ throwDiscoveryException(klass, name, false, "getDeclaredMethod()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetDeclaredMethods(klass, name) != canDiscover) {
+ throwDiscoveryException(klass, name, false, "getDeclaredMethods()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetMethod(klass, name) != (canDiscover && isPublic)) {
+ throwDiscoveryException(klass, name, false, "getMethod()", (canDiscover && isPublic));
+ }
+
+ if (Reflection.canDiscoverWithGetMethods(klass, name) != (canDiscover && isPublic)) {
+ throwDiscoveryException(klass, name, false, "getMethods()", (canDiscover && isPublic));
+ }
+
+ // Test discovery with JNI.
+
+ if (JNI.canDiscoverMethod(klass, name, isStatic) != canDiscover) {
+ throwDiscoveryException(klass, name, false, "JNI", canDiscover);
+ }
+
+ // Finish here if we could not discover the field.
+
+ if (!canDiscover) {
+ return;
+ }
+
+ // Test that modifiers are unaffected.
+
+ if (Reflection.canObserveMethodHiddenAccessFlags(klass, name)) {
+ throwModifiersException(klass, name, false);
+ }
+
+ // Test whether we can invoke the method. This skips non-static interface methods.
+
+ if (!klass.isInterface() || isStatic) {
+ clearWarning();
+ if (!Reflection.canInvokeMethod(klass, name)) {
+ throwAccessException(klass, name, false, "invoke()");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, false, "invoke()", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canInvokeMethodA(klass, name, isStatic)) {
+ throwAccessException(klass, name, false, "CallMethodA");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, false, "CallMethodA()", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canInvokeMethodV(klass, name, isStatic)) {
+ throwAccessException(klass, name, false, "CallMethodV");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, name, false, "CallMethodV()", setsWarning);
+ }
+ }
+ }
+
+ private static void checkConstructor(Class<?> klass, Visibility visibility, Hiddenness hiddenness,
+ Behaviour behaviour) throws Exception {
+
+ boolean isPublic = (visibility == Visibility.Public);
+ String signature = "(" + visibility.mAssociatedType.mShorty +
+ hiddenness.mAssociatedType.mShorty + ")V";
+ String fullName = "<init>" + signature;
+ Class<?> args[] = new Class[] { visibility.mAssociatedType.mClass,
+ hiddenness.mAssociatedType.mClass };
+ Object initargs[] = new Object[] { visibility.mAssociatedType.mDefaultValue,
+ hiddenness.mAssociatedType.mDefaultValue };
+
+ boolean canDiscover = (behaviour != Behaviour.Denied);
+ boolean setsWarning = (behaviour == Behaviour.Warning);
+
+ // Test discovery with reflection.
+
+ if (Reflection.canDiscoverWithGetDeclaredConstructor(klass, args) != canDiscover) {
+ throwDiscoveryException(klass, fullName, false, "getDeclaredConstructor()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetDeclaredConstructors(klass, args) != canDiscover) {
+ throwDiscoveryException(klass, fullName, false, "getDeclaredConstructors()", canDiscover);
+ }
+
+ if (Reflection.canDiscoverWithGetConstructor(klass, args) != (canDiscover && isPublic)) {
+ throwDiscoveryException(
+ klass, fullName, false, "getConstructor()", (canDiscover && isPublic));
+ }
+
+ if (Reflection.canDiscoverWithGetConstructors(klass, args) != (canDiscover && isPublic)) {
+ throwDiscoveryException(
+ klass, fullName, false, "getConstructors()", (canDiscover && isPublic));
+ }
+
+ // Test discovery with JNI.
+
+ if (JNI.canDiscoverConstructor(klass, signature) != canDiscover) {
+ throwDiscoveryException(klass, fullName, false, "JNI", canDiscover);
+ }
+
+ // Finish here if we could not discover the field.
+
+ if (!canDiscover) {
+ return;
+ }
+
+ // Test whether we can invoke the constructor.
+
+ clearWarning();
+ if (!Reflection.canInvokeConstructor(klass, args, initargs)) {
+ throwAccessException(klass, fullName, false, "invoke()");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, fullName, false, "invoke()", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canInvokeConstructorA(klass, signature)) {
+ throwAccessException(klass, fullName, false, "NewObjectA");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, fullName, false, "NewObjectA", setsWarning);
+ }
+
+ clearWarning();
+ if (!JNI.canInvokeConstructorV(klass, signature)) {
+ throwAccessException(klass, fullName, false, "NewObjectV");
+ }
+ if (hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, fullName, false, "NewObjectV", setsWarning);
+ }
+ }
+
+ private static void checkNullaryConstructor(Class<?> klass, Behaviour behaviour)
+ throws Exception {
+ boolean canAccess = (behaviour != Behaviour.Denied);
+ boolean setsWarning = (behaviour == Behaviour.Warning);
+
+ clearWarning();
+ if (Reflection.canUseNewInstance(klass) != canAccess) {
+ throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") +
+ "be able to construct " + klass.getName() + ". " +
+ "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
+ }
+ if (canAccess && hasPendingWarning() != setsWarning) {
+ throwWarningException(klass, "nullary constructor", false, "newInstance", setsWarning);
+ }
+ }
+
+ private static void checkLinking(String className, boolean takesParameter, Behaviour behaviour)
+ throws Exception {
+ boolean canAccess = (behaviour != Behaviour.Denied);
+ boolean setsWarning = false; // we do not set the flag in verifier or at runtime
+
+ clearWarning();
+ if (Linking.canAccess(className, takesParameter) != canAccess) {
+ throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") +
+ "be able to verify " + className + "." +
+ "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
+ }
+ if (canAccess && hasPendingWarning() != setsWarning) {
+ throwWarningException(
+ Class.forName(className), "access", false, "static linking", setsWarning);
+ }
+ }
+
+ private static void throwDiscoveryException(Class<?> klass, String name, boolean isField,
+ String fn, boolean canAccess) {
+ throw new RuntimeException("Expected " + (isField ? "field " : "method ") + klass.getName() +
+ "." + name + " to " + (canAccess ? "" : "not ") + "be discoverable with " + fn + ". " +
+ "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
+ }
+
+ private static void throwAccessException(Class<?> klass, String name, boolean isField,
+ String fn) {
+ throw new RuntimeException("Expected to be able to access " + (isField ? "field " : "method ") +
+ klass.getName() + "." + name + " using " + fn + ". " +
+ "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
+ }
+
+ private static void throwWarningException(Class<?> klass, String name, boolean isField,
+ String fn, boolean setsWarning) {
+ throw new RuntimeException("Expected access to " + (isField ? "field " : "method ") +
+ klass.getName() + "." + name + " using " + fn + " to " + (setsWarning ? "" : "not ") +
+ "set the warning flag. " +
+ "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot);
+ }
+
+ private static void throwModifiersException(Class<?> klass, String name, boolean isField) {
+ throw new RuntimeException("Expected " + (isField ? "field " : "method ") + klass.getName() +
+ "." + name + " to not expose hidden modifiers");
+ }
+
+ private static boolean isParentInBoot;
+ private static boolean isChildInBoot;
+
+ private static native boolean hasPendingWarning();
+ private static native void clearWarning();
+}
diff --git a/test/674-hiddenapi/src-ex/JNI.java b/test/674-hiddenapi/src-ex/JNI.java
new file mode 100644
index 0000000000..5dfb2963fa
--- /dev/null
+++ b/test/674-hiddenapi/src-ex/JNI.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class JNI {
+ public static native boolean canDiscoverField(Class<?> klass, String name, boolean isStatic);
+ public static native boolean canGetField(Class<?> klass, String name, boolean isStatic);
+ public static native boolean canSetField(Class<?> klass, String name, boolean isStatic);
+
+ public static native boolean canDiscoverMethod(Class<?> klass, String name, boolean isStatic);
+ public static native boolean canInvokeMethodA(Class<?> klass, String name, boolean isStatic);
+ public static native boolean canInvokeMethodV(Class<?> klass, String name, boolean isStatic);
+
+ public static native boolean canDiscoverConstructor(Class<?> klass, String signature);
+ public static native boolean canInvokeConstructorA(Class<?> klass, String signature);
+ public static native boolean canInvokeConstructorV(Class<?> klass, String signature);
+}
diff --git a/test/674-hiddenapi/src-ex/Linking.java b/test/674-hiddenapi/src-ex/Linking.java
new file mode 100644
index 0000000000..c6735d85fe
--- /dev/null
+++ b/test/674-hiddenapi/src-ex/Linking.java
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+
+public class Linking {
+ public static boolean canAccess(String className, boolean takesParameter) throws Exception {
+ try {
+ Class<?> c = Class.forName(className);
+ if (takesParameter) {
+ c.getDeclaredMethod("access", Integer.TYPE).invoke(null, 42);
+ } else {
+ c.getDeclaredMethod("access").invoke(null);
+ }
+ return true;
+ } catch (InvocationTargetException ex) {
+ if (ex.getCause() instanceof IllegalAccessError) {
+ return false;
+ } else {
+ throw ex;
+ }
+ }
+ }
+}
+
+// INSTANCE FIELD GET
+
+class LinkFieldGetWhitelist {
+ public static int access() {
+ return new ParentClass().fieldPublicWhitelist;
+ }
+}
+
+class LinkFieldGetLightGreylist {
+ public static int access() {
+ return new ParentClass().fieldPublicLightGreylist;
+ }
+}
+
+class LinkFieldGetDarkGreylist {
+ public static int access() {
+ return new ParentClass().fieldPublicDarkGreylist;
+ }
+}
+
+class LinkFieldGetBlacklist {
+ public static int access() {
+ return new ParentClass().fieldPublicBlacklist;
+ }
+}
+
+// INSTANCE FIELD SET
+
+class LinkFieldSetWhitelist {
+ public static void access(int x) {
+ new ParentClass().fieldPublicWhitelist = x;
+ }
+}
+
+class LinkFieldSetLightGreylist {
+ public static void access(int x) {
+ new ParentClass().fieldPublicLightGreylist = x;
+ }
+}
+
+class LinkFieldSetDarkGreylist {
+ public static void access(int x) {
+ new ParentClass().fieldPublicDarkGreylist = x;
+ }
+}
+
+class LinkFieldSetBlacklist {
+ public static void access(int x) {
+ new ParentClass().fieldPublicBlacklist = x;
+ }
+}
+
+// STATIC FIELD GET
+
+class LinkFieldGetStaticWhitelist {
+ public static int access() {
+ return ParentClass.fieldPublicStaticWhitelist;
+ }
+}
+
+class LinkFieldGetStaticLightGreylist {
+ public static int access() {
+ return ParentClass.fieldPublicStaticLightGreylist;
+ }
+}
+
+class LinkFieldGetStaticDarkGreylist {
+ public static int access() {
+ return ParentClass.fieldPublicStaticDarkGreylist;
+ }
+}
+
+class LinkFieldGetStaticBlacklist {
+ public static int access() {
+ return ParentClass.fieldPublicStaticBlacklist;
+ }
+}
+
+// STATIC FIELD SET
+
+class LinkFieldSetStaticWhitelist {
+ public static void access(int x) {
+ ParentClass.fieldPublicStaticWhitelist = x;
+ }
+}
+
+class LinkFieldSetStaticLightGreylist {
+ public static void access(int x) {
+ ParentClass.fieldPublicStaticLightGreylist = x;
+ }
+}
+
+class LinkFieldSetStaticDarkGreylist {
+ public static void access(int x) {
+ ParentClass.fieldPublicStaticDarkGreylist = x;
+ }
+}
+
+class LinkFieldSetStaticBlacklist {
+ public static void access(int x) {
+ ParentClass.fieldPublicStaticBlacklist = x;
+ }
+}
+
+// INVOKE INSTANCE METHOD
+
+class LinkMethodWhitelist {
+ public static int access() {
+ return new ParentClass().methodPublicWhitelist();
+ }
+}
+
+class LinkMethodLightGreylist {
+ public static int access() {
+ return new ParentClass().methodPublicLightGreylist();
+ }
+}
+
+class LinkMethodDarkGreylist {
+ public static int access() {
+ return new ParentClass().methodPublicDarkGreylist();
+ }
+}
+
+class LinkMethodBlacklist {
+ public static int access() {
+ return new ParentClass().methodPublicBlacklist();
+ }
+}
+
+// INVOKE STATIC METHOD
+
+class LinkMethodStaticWhitelist {
+ public static int access() {
+ return ParentClass.methodPublicStaticWhitelist();
+ }
+}
+
+class LinkMethodStaticLightGreylist {
+ public static int access() {
+ return ParentClass.methodPublicStaticLightGreylist();
+ }
+}
+
+class LinkMethodStaticDarkGreylist {
+ public static int access() {
+ return ParentClass.methodPublicStaticDarkGreylist();
+ }
+}
+
+class LinkMethodStaticBlacklist {
+ public static int access() {
+ return ParentClass.methodPublicStaticBlacklist();
+ }
+}
diff --git a/test/674-hiddenapi/src-ex/Reflection.java b/test/674-hiddenapi/src-ex/Reflection.java
new file mode 100644
index 0000000000..3667e91611
--- /dev/null
+++ b/test/674-hiddenapi/src-ex/Reflection.java
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+
+public class Reflection {
+ public static boolean canDiscoverWithGetDeclaredField(Class<?> klass, String name) {
+ try {
+ klass.getDeclaredField(name);
+ return true;
+ } catch (NoSuchFieldException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetDeclaredFields(Class<?> klass, String name) {
+ for (Field f : klass.getDeclaredFields()) {
+ if (f.getName().equals(name)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canDiscoverWithGetField(Class<?> klass, String name) {
+ try {
+ klass.getField(name);
+ return true;
+ } catch (NoSuchFieldException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetFields(Class<?> klass, String name) {
+ for (Field f : klass.getFields()) {
+ if (f.getName().equals(name)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canGetField(Class<?> klass, String name) {
+ try {
+ Field f = klass.getDeclaredField(name);
+ f.setAccessible(true);
+ f.getInt(Modifier.isStatic(f.getModifiers()) ? null : klass.newInstance());
+ return true;
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ return false;
+ }
+ }
+
+ public static boolean canSetField(Class<?> klass, String name) {
+ try {
+ Field f = klass.getDeclaredField(name);
+ f.setAccessible(true);
+ f.setInt(Modifier.isStatic(f.getModifiers()) ? null : klass.newInstance(), 42);
+ return true;
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetDeclaredMethod(Class<?> klass, String name) {
+ try {
+ klass.getDeclaredMethod(name);
+ return true;
+ } catch (NoSuchMethodException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetDeclaredMethods(Class<?> klass, String name) {
+ for (Method m : klass.getDeclaredMethods()) {
+ if (m.getName().equals(name)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canDiscoverWithGetMethod(Class<?> klass, String name) {
+ try {
+ klass.getMethod(name);
+ return true;
+ } catch (NoSuchMethodException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetMethods(Class<?> klass, String name) {
+ for (Method m : klass.getMethods()) {
+ if (m.getName().equals(name)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canInvokeMethod(Class<?> klass, String name) {
+ try {
+ Method m = klass.getDeclaredMethod(name);
+ m.setAccessible(true);
+ m.invoke(klass.isInterface() ? null : klass.newInstance());
+ return true;
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetDeclaredConstructor(Class<?> klass, Class<?> args[]) {
+ try {
+ klass.getDeclaredConstructor(args);
+ return true;
+ } catch (NoSuchMethodException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetDeclaredConstructors(Class<?> klass, Class<?> args[]) {
+ for (Constructor c : klass.getDeclaredConstructors()) {
+ if (Arrays.equals(c.getParameterTypes(), args)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canDiscoverWithGetConstructor(Class<?> klass, Class<?> args[]) {
+ try {
+ klass.getConstructor(args);
+ return true;
+ } catch (NoSuchMethodException ex) {
+ return false;
+ }
+ }
+
+ public static boolean canDiscoverWithGetConstructors(Class<?> klass, Class<?> args[]) {
+ for (Constructor c : klass.getConstructors()) {
+ if (Arrays.equals(c.getParameterTypes(), args)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static boolean canInvokeConstructor(Class<?> klass, Class<?> args[], Object[] initargs) {
+ try {
+ Constructor c = klass.getDeclaredConstructor(args);
+ c.setAccessible(true);
+ c.newInstance(initargs);
+ return true;
+ } catch (Exception ex) {
+ ex.printStackTrace();
+ return false;
+ }
+ }
+
+ public static boolean canUseNewInstance(Class<?> klass) throws IllegalAccessException {
+ try {
+ klass.newInstance();
+ return true;
+ } catch (InstantiationException ex) {
+ return false;
+ }
+ }
+
+ private static native int getHiddenApiAccessFlags();
+
+ public static boolean canObserveFieldHiddenAccessFlags(Class<?> klass, String name)
+ throws Exception {
+ return (klass.getDeclaredField(name).getModifiers() & getHiddenApiAccessFlags()) != 0;
+ }
+
+ public static boolean canObserveMethodHiddenAccessFlags(Class<?> klass, String name)
+ throws Exception {
+ return (klass.getDeclaredMethod(name).getModifiers() & getHiddenApiAccessFlags()) != 0;
+ }
+
+ public static boolean canObserveConstructorHiddenAccessFlags(Class<?> klass, Class<?> args[])
+ throws Exception {
+ return (klass.getConstructor(args).getModifiers() & getHiddenApiAccessFlags()) != 0;
+ }
+}
diff --git a/test/674-hiddenapi/src/NullaryConstructorBlacklist.java b/test/674-hiddenapi/src/NullaryConstructorBlacklist.java
new file mode 100644
index 0000000000..5bf6278a77
--- /dev/null
+++ b/test/674-hiddenapi/src/NullaryConstructorBlacklist.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class NullaryConstructorBlacklist {
+ public NullaryConstructorBlacklist() { x = 22; }
+ public NullaryConstructorBlacklist(int y) { x = y; }
+ protected int x;
+}
diff --git a/test/674-hiddenapi/src/NullaryConstructorDarkGreylist.java b/test/674-hiddenapi/src/NullaryConstructorDarkGreylist.java
new file mode 100644
index 0000000000..c25a767d1d
--- /dev/null
+++ b/test/674-hiddenapi/src/NullaryConstructorDarkGreylist.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class NullaryConstructorDarkGreylist {
+ public NullaryConstructorDarkGreylist() { x = 22; }
+ public NullaryConstructorDarkGreylist(int y) { x = y; }
+ protected int x;
+}
diff --git a/test/674-hiddenapi/src/NullaryConstructorLightGreylist.java b/test/674-hiddenapi/src/NullaryConstructorLightGreylist.java
new file mode 100644
index 0000000000..d5dac8b7c0
--- /dev/null
+++ b/test/674-hiddenapi/src/NullaryConstructorLightGreylist.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class NullaryConstructorLightGreylist {
+ public NullaryConstructorLightGreylist() { x = 22; }
+ public NullaryConstructorLightGreylist(int y) { x = y; }
+ protected int x;
+}
diff --git a/test/674-hiddenapi/src/NullaryConstructorWhitelist.java b/test/674-hiddenapi/src/NullaryConstructorWhitelist.java
new file mode 100644
index 0000000000..d1019077cf
--- /dev/null
+++ b/test/674-hiddenapi/src/NullaryConstructorWhitelist.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class NullaryConstructorWhitelist {
+ public NullaryConstructorWhitelist() { x = 22; }
+ public NullaryConstructorWhitelist(int y) { x = y; }
+ protected int x;
+}
diff --git a/test/674-hiddenapi/src/ParentClass.java b/test/674-hiddenapi/src/ParentClass.java
new file mode 100644
index 0000000000..edad02dc2c
--- /dev/null
+++ b/test/674-hiddenapi/src/ParentClass.java
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class ParentClass {
+ public ParentClass() {}
+
+ // INSTANCE FIELD
+
+ public int fieldPublicWhitelist = 211;
+ int fieldPackageWhitelist = 212;
+ protected int fieldProtectedWhitelist = 213;
+ private int fieldPrivateWhitelist = 214;
+
+ public int fieldPublicLightGreylist = 221;
+ int fieldPackageLightGreylist = 222;
+ protected int fieldProtectedLightGreylist = 223;
+ private int fieldPrivateLightGreylist = 224;
+
+ public int fieldPublicDarkGreylist = 231;
+ int fieldPackageDarkGreylist = 232;
+ protected int fieldProtectedDarkGreylist = 233;
+ private int fieldPrivateDarkGreylist = 234;
+
+ public int fieldPublicBlacklist = 241;
+ int fieldPackageBlacklist = 242;
+ protected int fieldProtectedBlacklist = 243;
+ private int fieldPrivateBlacklist = 244;
+
+ // STATIC FIELD
+
+ public static int fieldPublicStaticWhitelist = 111;
+ static int fieldPackageStaticWhitelist = 112;
+ protected static int fieldProtectedStaticWhitelist = 113;
+ private static int fieldPrivateStaticWhitelist = 114;
+
+ public static int fieldPublicStaticLightGreylist = 121;
+ static int fieldPackageStaticLightGreylist = 122;
+ protected static int fieldProtectedStaticLightGreylist = 123;
+ private static int fieldPrivateStaticLightGreylist = 124;
+
+ public static int fieldPublicStaticDarkGreylist = 131;
+ static int fieldPackageStaticDarkGreylist = 132;
+ protected static int fieldProtectedStaticDarkGreylist = 133;
+ private static int fieldPrivateStaticDarkGreylist = 134;
+
+ public static int fieldPublicStaticBlacklist = 141;
+ static int fieldPackageStaticBlacklist = 142;
+ protected static int fieldProtectedStaticBlacklist = 143;
+ private static int fieldPrivateStaticBlacklist = 144;
+
+ // INSTANCE METHOD
+
+ public int methodPublicWhitelist() { return 411; }
+ int methodPackageWhitelist() { return 412; }
+ protected int methodProtectedWhitelist() { return 413; }
+ private int methodPrivateWhitelist() { return 414; }
+
+ public int methodPublicLightGreylist() { return 421; }
+ int methodPackageLightGreylist() { return 422; }
+ protected int methodProtectedLightGreylist() { return 423; }
+ private int methodPrivateLightGreylist() { return 424; }
+
+ public int methodPublicDarkGreylist() { return 431; }
+ int methodPackageDarkGreylist() { return 432; }
+ protected int methodProtectedDarkGreylist() { return 433; }
+ private int methodPrivateDarkGreylist() { return 434; }
+
+ public int methodPublicBlacklist() { return 441; }
+ int methodPackageBlacklist() { return 442; }
+ protected int methodProtectedBlacklist() { return 443; }
+ private int methodPrivateBlacklist() { return 444; }
+
+ // STATIC METHOD
+
+ public static int methodPublicStaticWhitelist() { return 311; }
+ static int methodPackageStaticWhitelist() { return 312; }
+ protected static int methodProtectedStaticWhitelist() { return 313; }
+ private static int methodPrivateStaticWhitelist() { return 314; }
+
+ public static int methodPublicStaticLightGreylist() { return 321; }
+ static int methodPackageStaticLightGreylist() { return 322; }
+ protected static int methodProtectedStaticLightGreylist() { return 323; }
+ private static int methodPrivateStaticLightGreylist() { return 324; }
+
+ public static int methodPublicStaticDarkGreylist() { return 331; }
+ static int methodPackageStaticDarkGreylist() { return 332; }
+ protected static int methodProtectedStaticDarkGreylist() { return 333; }
+ private static int methodPrivateStaticDarkGreylist() { return 334; }
+
+ public static int methodPublicStaticBlacklist() { return 341; }
+ static int methodPackageStaticBlacklist() { return 342; }
+ protected static int methodProtectedStaticBlacklist() { return 343; }
+ private static int methodPrivateStaticBlacklist() { return 344; }
+
+ // CONSTRUCTOR
+
+ // Whitelist
+ public ParentClass(int x, short y) {}
+ ParentClass(float x, short y) {}
+ protected ParentClass(long x, short y) {}
+ private ParentClass(double x, short y) {}
+
+ // Light greylist
+ public ParentClass(int x, boolean y) {}
+ ParentClass(float x, boolean y) {}
+ protected ParentClass(long x, boolean y) {}
+ private ParentClass(double x, boolean y) {}
+
+ // Dark greylist
+ public ParentClass(int x, byte y) {}
+ ParentClass(float x, byte y) {}
+ protected ParentClass(long x, byte y) {}
+ private ParentClass(double x, byte y) {}
+
+ // Blacklist
+ public ParentClass(int x, char y) {}
+ ParentClass(float x, char y) {}
+ protected ParentClass(long x, char y) {}
+ private ParentClass(double x, char y) {}
+}
diff --git a/test/674-hiddenapi/src/ParentInterface.java b/test/674-hiddenapi/src/ParentInterface.java
new file mode 100644
index 0000000000..e36fe0e6b2
--- /dev/null
+++ b/test/674-hiddenapi/src/ParentInterface.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface ParentInterface {
+ // STATIC FIELD
+ static int fieldPublicStaticWhitelist = 11;
+ static int fieldPublicStaticLightGreylist = 12;
+ static int fieldPublicStaticDarkGreylist = 13;
+ static int fieldPublicStaticBlacklist = 14;
+
+ // INSTANCE METHOD
+ int methodPublicWhitelist();
+ int methodPublicBlacklist();
+ int methodPublicLightGreylist();
+ int methodPublicDarkGreylist();
+
+ // STATIC METHOD
+ static int methodPublicStaticWhitelist() { return 21; }
+ static int methodPublicStaticLightGreylist() { return 22; }
+ static int methodPublicStaticDarkGreylist() { return 23; }
+ static int methodPublicStaticBlacklist() { return 24; }
+
+ // DEFAULT METHOD
+ default int methodPublicDefaultWhitelist() { return 31; }
+ default int methodPublicDefaultLightGreylist() { return 32; }
+ default int methodPublicDefaultDarkGreylist() { return 33; }
+ default int methodPublicDefaultBlacklist() { return 34; }
+}
diff --git a/test/674-hotness-compiled/expected.txt b/test/674-hotness-compiled/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/674-hotness-compiled/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/674-hotness-compiled/info.txt b/test/674-hotness-compiled/info.txt
new file mode 100644
index 0000000000..e2cf59a093
--- /dev/null
+++ b/test/674-hotness-compiled/info.txt
@@ -0,0 +1 @@
+Test for the --count-hotness-in-compiled-code compiler option.
diff --git a/test/674-hotness-compiled/run b/test/674-hotness-compiled/run
new file mode 100755
index 0000000000..85e8e3b13f
--- /dev/null
+++ b/test/674-hotness-compiled/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+${RUN} "$@" -Xcompiler-option --count-hotness-in-compiled-code
diff --git a/test/674-hotness-compiled/src/Main.java b/test/674-hotness-compiled/src/Main.java
new file mode 100644
index 0000000000..76ec92777f
--- /dev/null
+++ b/test/674-hotness-compiled/src/Main.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void $noinline$hotnessCount() {
+ }
+
+ public static void $noinline$hotnessCountWithLoop() {
+ for (int i = 0; i < 100; i++) {
+ $noinline$hotnessCount();
+ }
+ }
+
+ public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+ if (!isAotCompiled(Main.class, "main")) {
+ return;
+ }
+ $noinline$hotnessCount();
+ int counter = getHotnessCounter(Main.class, "$noinline$hotnessCount");
+ if (counter == 0) {
+ throw new Error("Expected hotness counter to be updated");
+ }
+
+ $noinline$hotnessCountWithLoop();
+ if (getHotnessCounter(Main.class, "$noinline$hotnessCountWithLoop") <= counter) {
+ throw new Error("Expected hotness counter of a loop to be greater than without loop");
+ }
+ }
+
+ public static native int getHotnessCounter(Class<?> cls, String methodName);
+ public static native boolean isAotCompiled(Class<?> cls, String methodName);
+}
diff --git a/test/674-vdex-uncompress/build b/test/674-vdex-uncompress/build
new file mode 100755
index 0000000000..7b1804d3e0
--- /dev/null
+++ b/test/674-vdex-uncompress/build
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Uncompress and align the dex files so that dex2oat will not copy the dex
+# code to the .vdex file.
+./default-build "$@" --zip-compression-method store --zip-align 4
diff --git a/test/674-vdex-uncompress/expected.txt b/test/674-vdex-uncompress/expected.txt
new file mode 100644
index 0000000000..d0f61f692c
--- /dev/null
+++ b/test/674-vdex-uncompress/expected.txt
@@ -0,0 +1,2 @@
+In foo
+In foo
diff --git a/test/674-vdex-uncompress/info.txt b/test/674-vdex-uncompress/info.txt
new file mode 100644
index 0000000000..6aa6f7b0d5
--- /dev/null
+++ b/test/674-vdex-uncompress/info.txt
@@ -0,0 +1,2 @@
+Test that dex2oat can compile an APK that has uncompressed dex files,
+and where --input-vdex is passed.
diff --git a/test/674-vdex-uncompress/run b/test/674-vdex-uncompress/run
new file mode 100644
index 0000000000..edf699f842
--- /dev/null
+++ b/test/674-vdex-uncompress/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} -Xcompiler-option --compiler-filter=verify --vdex "${@}"
diff --git a/test/674-vdex-uncompress/src/Main.java b/test/674-vdex-uncompress/src/Main.java
new file mode 100644
index 0000000000..0a25b564fe
--- /dev/null
+++ b/test/674-vdex-uncompress/src/Main.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ Main() {
+ // Will be quickened with RETURN_VOID_NO_BARRIER.
+ }
+
+ public static void main(String[] args) {
+ Main m = new Main();
+ Object o = m;
+ // The call and field accesses will be quickened.
+ m.foo(m.a);
+
+ // The checkcast will be quickened.
+ m.foo(((Main)o).a);
+ }
+
+ int a;
+ void foo(int a) {
+ System.out.println("In foo");
+ }
+}
+
diff --git a/test/710-varhandle-creation/src-art/Main.java b/test/710-varhandle-creation/src-art/Main.java
index 6d542bbb10..246aac6900 100644
--- a/test/710-varhandle-creation/src-art/Main.java
+++ b/test/710-varhandle-creation/src-art/Main.java
@@ -1,24 +1,19 @@
/*
- * Copyright 2017 Google Inc.
+ * Copyright (C) 2018 The Android Open Source Project
*
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Google designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Google in the LICENSE file that accompanied this code.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
-import dalvik.system.VMRuntime;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.VarHandle;
import java.lang.invoke.VarHandle.AccessMode;
@@ -129,9 +124,6 @@ public final class Main {
static final VarHandle vbbd;
static final VarHandle vbbo;
- // Some test results vary depending on 32-bit vs 64-bit.
- static final boolean IS_64_BIT = VMRuntime.getRuntime().is64Bit();
-
static {
try {
vz = MethodHandles.lookup().findVarHandle(Main.class, "z", boolean.class);
@@ -1728,14 +1720,14 @@ public final class Main {
checkNotNull(vbaj);
checkVarType(vbaj, long.class);
checkCoordinateTypes(vbaj, "[class [B, int]");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET, IS_64_BIT, "(byte[],int)long");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET, IS_64_BIT, "(byte[],int,long)void");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_VOLATILE, IS_64_BIT, "(byte[],int)long");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_VOLATILE, IS_64_BIT, "(byte[],int,long)void");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_ACQUIRE, IS_64_BIT, "(byte[],int)long");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_RELEASE, IS_64_BIT, "(byte[],int,long)void");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_OPAQUE, IS_64_BIT, "(byte[],int)long");
- checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_OPAQUE, IS_64_BIT, "(byte[],int,long)void");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET, true, "(byte[],int)long");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET, true, "(byte[],int,long)void");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_VOLATILE, true, "(byte[],int)long");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_VOLATILE, true, "(byte[],int,long)void");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_ACQUIRE, true, "(byte[],int)long");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_RELEASE, true, "(byte[],int,long)void");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.GET_OPAQUE, true, "(byte[],int)long");
+ checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.SET_OPAQUE, true, "(byte[],int,long)void");
checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.COMPARE_AND_SET, true, "(byte[],int,long,long)boolean");
checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.COMPARE_AND_EXCHANGE, true, "(byte[],int,long,long)long");
checkVarHandleAccessMode(vbaj, VarHandle.AccessMode.COMPARE_AND_EXCHANGE_ACQUIRE, true, "(byte[],int,long,long)long");
@@ -1800,14 +1792,14 @@ public final class Main {
checkNotNull(vbad);
checkVarType(vbad, double.class);
checkCoordinateTypes(vbad, "[class [B, int]");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET, IS_64_BIT, "(byte[],int)double");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET, IS_64_BIT, "(byte[],int,double)void");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_VOLATILE, IS_64_BIT, "(byte[],int)double");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_VOLATILE, IS_64_BIT, "(byte[],int,double)void");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_ACQUIRE, IS_64_BIT, "(byte[],int)double");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_RELEASE, IS_64_BIT, "(byte[],int,double)void");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_OPAQUE, IS_64_BIT, "(byte[],int)double");
- checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_OPAQUE, IS_64_BIT, "(byte[],int,double)void");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET, true, "(byte[],int)double");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET, true, "(byte[],int,double)void");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_VOLATILE, true, "(byte[],int)double");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_VOLATILE, true, "(byte[],int,double)void");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_ACQUIRE, true, "(byte[],int)double");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_RELEASE, true, "(byte[],int,double)void");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.GET_OPAQUE, true, "(byte[],int)double");
+ checkVarHandleAccessMode(vbad, VarHandle.AccessMode.SET_OPAQUE, true, "(byte[],int,double)void");
checkVarHandleAccessMode(vbad, VarHandle.AccessMode.COMPARE_AND_SET, true, "(byte[],int,double,double)boolean");
checkVarHandleAccessMode(vbad, VarHandle.AccessMode.COMPARE_AND_EXCHANGE, true, "(byte[],int,double,double)double");
checkVarHandleAccessMode(vbad, VarHandle.AccessMode.COMPARE_AND_EXCHANGE_ACQUIRE, true, "(byte[],int,double,double)double");
@@ -1953,14 +1945,14 @@ public final class Main {
checkNotNull(vbbj);
checkVarType(vbbj, long.class);
checkCoordinateTypes(vbbj, "[class java.nio.ByteBuffer, int]");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET, IS_64_BIT, "(ByteBuffer,int)long");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET, IS_64_BIT, "(ByteBuffer,int,long)void");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_VOLATILE, IS_64_BIT, "(ByteBuffer,int)long");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_VOLATILE, IS_64_BIT, "(ByteBuffer,int,long)void");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_ACQUIRE, IS_64_BIT, "(ByteBuffer,int)long");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_RELEASE, IS_64_BIT, "(ByteBuffer,int,long)void");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_OPAQUE, IS_64_BIT, "(ByteBuffer,int)long");
- checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_OPAQUE, IS_64_BIT, "(ByteBuffer,int,long)void");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET, true, "(ByteBuffer,int)long");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET, true, "(ByteBuffer,int,long)void");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_VOLATILE, true, "(ByteBuffer,int)long");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_VOLATILE, true, "(ByteBuffer,int,long)void");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_ACQUIRE, true, "(ByteBuffer,int)long");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_RELEASE, true, "(ByteBuffer,int,long)void");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.GET_OPAQUE, true, "(ByteBuffer,int)long");
+ checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.SET_OPAQUE, true, "(ByteBuffer,int,long)void");
checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.COMPARE_AND_SET, true, "(ByteBuffer,int,long,long)boolean");
checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.COMPARE_AND_EXCHANGE, true, "(ByteBuffer,int,long,long)long");
checkVarHandleAccessMode(vbbj, VarHandle.AccessMode.COMPARE_AND_EXCHANGE_ACQUIRE, true, "(ByteBuffer,int,long,long)long");
@@ -2025,14 +2017,14 @@ public final class Main {
checkNotNull(vbbd);
checkVarType(vbbd, double.class);
checkCoordinateTypes(vbbd, "[class java.nio.ByteBuffer, int]");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET, IS_64_BIT, "(ByteBuffer,int)double");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET, IS_64_BIT, "(ByteBuffer,int,double)void");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_VOLATILE, IS_64_BIT, "(ByteBuffer,int)double");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_VOLATILE, IS_64_BIT, "(ByteBuffer,int,double)void");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_ACQUIRE, IS_64_BIT, "(ByteBuffer,int)double");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_RELEASE, IS_64_BIT, "(ByteBuffer,int,double)void");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_OPAQUE, IS_64_BIT, "(ByteBuffer,int)double");
- checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_OPAQUE, IS_64_BIT, "(ByteBuffer,int,double)void");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET, true, "(ByteBuffer,int)double");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET, true, "(ByteBuffer,int,double)void");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_VOLATILE, true, "(ByteBuffer,int)double");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_VOLATILE, true, "(ByteBuffer,int,double)void");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_ACQUIRE, true, "(ByteBuffer,int)double");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_RELEASE, true, "(ByteBuffer,int,double)void");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.GET_OPAQUE, true, "(ByteBuffer,int)double");
+ checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.SET_OPAQUE, true, "(ByteBuffer,int,double)void");
checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.COMPARE_AND_SET, true, "(ByteBuffer,int,double,double)boolean");
checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.COMPARE_AND_EXCHANGE, true, "(ByteBuffer,int,double,double)double");
checkVarHandleAccessMode(vbbd, VarHandle.AccessMode.COMPARE_AND_EXCHANGE_ACQUIRE, true, "(ByteBuffer,int,double,double)double");
diff --git a/test/714-invoke-custom-lambda-metafactory/build b/test/714-invoke-custom-lambda-metafactory/build
new file mode 100644
index 0000000000..b5002ba79c
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/build
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make us exit on a failure
+set -e
+
+# Opt-out from desugaring to ensure offending lambda is in the DEX.
+export USE_DESUGAR=false
+./default-build "$@" --experimental method-handles
diff --git a/test/714-invoke-custom-lambda-metafactory/expected.txt b/test/714-invoke-custom-lambda-metafactory/expected.txt
new file mode 100644
index 0000000000..cbe98404c5
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/expected.txt
@@ -0,0 +1,4 @@
+Exception in thread "main" java.lang.BootstrapMethodError: Exception from call site #0 bootstrap method
+ at Main.main(Main.java:25)
+Caused by: java.lang.NullPointerException: Bootstrap method returned null
+ ... 1 more
diff --git a/test/714-invoke-custom-lambda-metafactory/info.txt b/test/714-invoke-custom-lambda-metafactory/info.txt
new file mode 100644
index 0000000000..4ef117b7e3
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/info.txt
@@ -0,0 +1 @@
+Checks that ART doesn't crash when it encounters LambdaMetafactory.
diff --git a/test/714-invoke-custom-lambda-metafactory/src/Main.java b/test/714-invoke-custom-lambda-metafactory/src/Main.java
new file mode 100644
index 0000000000..74e0ad4370
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+
+public class Main {
+ public static void main(String[] args) {
+ int requiredLength = 3;
+ List<String> list = Arrays.asList("A", "B", "C", "D", "EEE");
+ Optional<String> result = list.stream().filter(x -> x.length() >= requiredLength).findAny();
+ if (result.isPresent()) {
+ System.out.println("Result is " + result.get());
+ } else {
+ System.out.println("Result is not there.");
+ }
+ }
+}
diff --git a/test/913-heaps/expected_d8.diff b/test/913-heaps/expected_d8.diff
index 83694220a3..3ea3c0d2b0 100644
--- a/test/913-heaps/expected_d8.diff
+++ b/test/913-heaps/expected_d8.diff
@@ -39,37 +39,31 @@
---
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
-201,202c202,203
+201,202c202
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 164])--> 1000@0 [size=123456780050, length=-1]
-246c247
+246c246
< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-248c249,251
+248c248,249
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=8,location= 21])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 164])--> 1000@0 [size=123456780055, length=-1]
-292c295
+292d292
< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 181])--> 1000@0 [size=123456780060, length=-1]
-319a323
-> root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 181])--> 1000@0 [size=123456780065, length=-1]
-347c351
+347c347
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-366c370
+366c366
< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-368c372,373
+368c368,369
< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
---
> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=5,location= 21])--> 1@1000 [size=16, length=-1]
diff --git a/test/959-invoke-polymorphic-accessors/src/Main.java b/test/959-invoke-polymorphic-accessors/src/Main.java
index cdde1de515..03fd285d46 100644
--- a/test/959-invoke-polymorphic-accessors/src/Main.java
+++ b/test/959-invoke-polymorphic-accessors/src/Main.java
@@ -901,6 +901,10 @@ public class Main {
fail();
} catch (WrongMethodTypeException expected) {}
try {
+ h0.invoke(Double.valueOf(0.33));
+ fail();
+ } catch (WrongMethodTypeException expected) {}
+ try {
Number doubleNumber = getDoubleAsNumber();
h0.invoke(doubleNumber);
fail();
diff --git a/test/983-source-transform-verify/expected.txt b/test/983-source-transform-verify/expected.txt
index abcdf3a868..aa51ea08ae 100644
--- a/test/983-source-transform-verify/expected.txt
+++ b/test/983-source-transform-verify/expected.txt
@@ -1,2 +1,3 @@
Dex file hook for art/Test983$Transform
Dex file hook for java/lang/Object
+Dex file hook for java/lang/ClassLoader
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index e9cb35e944..dfefce207b 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -28,6 +28,7 @@
#include "base/macros.h"
#include "bytecode_utils.h"
#include "dex/code_item_accessors-inl.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_instruction.h"
@@ -66,15 +67,24 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
if (IsJVM()) {
return;
}
+
+ // Due to b/72402467 the class_data_len might just be an estimate.
+ CHECK_GE(static_cast<size_t>(class_data_len), sizeof(DexFile::Header));
+ const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(class_data);
+ uint32_t header_file_size = header->file_size_;
+ CHECK_LE(static_cast<jint>(header_file_size), class_data_len);
+ class_data_len = static_cast<jint>(header_file_size);
+
+ const ArtDexFileLoader dex_file_loader;
std::string error;
- std::unique_ptr<const DexFile> dex(DexFileLoader::Open(class_data,
- class_data_len,
- "fake_location.dex",
- /*location_checksum*/ 0,
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ true,
- &error));
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open(class_data,
+ class_data_len,
+ "fake_location.dex",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error));
if (dex.get() == nullptr) {
std::cout << "Failed to verify dex file for " << name << " because " << error << std::endl;
return;
diff --git a/test/983-source-transform-verify/src/art/Test983.java b/test/983-source-transform-verify/src/art/Test983.java
index b81e7f4df3..faae96aef6 100644
--- a/test/983-source-transform-verify/src/art/Test983.java
+++ b/test/983-source-transform-verify/src/art/Test983.java
@@ -16,7 +16,6 @@
package art;
-import java.util.Base64;
public class Test983 {
static class Transform {
public void sayHi() {
@@ -29,10 +28,11 @@ public class Test983 {
}
public static void doTest() {
- Transform abc = new Transform();
Redefinition.enableCommonRetransformation(true);
Redefinition.doCommonClassRetransformation(Transform.class);
Redefinition.doCommonClassRetransformation(Object.class);
+ // NB java.lang.ClassLoader has hidden fields.
+ Redefinition.doCommonClassRetransformation(ClassLoader.class);
Redefinition.enableCommonRetransformation(false);
}
}
diff --git a/test/Android.bp b/test/Android.bp
index f5ca2f0338..72e8eee95a 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -62,6 +62,7 @@ art_cc_defaults {
"libvixld-arm",
"libvixld-arm64",
"libart-gtest",
+ "libdexfile",
"libbase",
"libicuuc",
@@ -113,6 +114,7 @@ art_cc_defaults {
shared_libs: [
"libartd",
"libartd-compiler",
+ "libdexfile",
],
static_libs: [
"libgtest",
@@ -149,6 +151,7 @@ art_cc_library {
shared_libs: [
"libartd",
"libartd-compiler",
+ "libdexfile",
"libbase",
"libbacktrace",
],
@@ -166,6 +169,7 @@ cc_defaults {
"art_defaults",
],
shared_libs: [
+ "libdexfile",
"libbacktrace",
"libbase",
"libnativehelper",
@@ -264,6 +268,7 @@ art_cc_defaults {
"1943-suspend-raw-monitor-wait/native_suspend_monitor.cc",
],
shared_libs: [
+ "libdexfile",
"libbase",
],
header_libs: [
@@ -368,7 +373,9 @@ cc_defaults {
"149-suspend-all-stress/suspend_all.cc",
"154-gc-loop/heap_interface.cc",
"167-visit-locks/visit_locks.cc",
+ "169-threadgroup-jni/jni_daemon_thread.cc",
"203-multi-checkpoint/multi_checkpoint.cc",
+ "305-other-fault-handler/fault_handler.cc",
"454-get-vreg/get_vreg_jni.cc",
"457-regs/regs_jni.cc",
"461-get-reference-vreg/get_reference_vreg_jni.cc",
@@ -388,9 +395,11 @@ cc_defaults {
"661-oat-writer-layout/oat_writer_layout.cc",
"664-aget-verifier/aget-verifier.cc",
"667-jit-jni-stub/jit_jni_stub_test.cc",
+ "674-hiddenapi/hiddenapi.cc",
"708-jit-cache-churn/jit.cc",
],
shared_libs: [
+ "libdexfile",
"libbacktrace",
"libbase",
"libnativehelper",
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 2cada76d90..2df0cc6fae 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -20,6 +20,7 @@ include art/build/Android.common_test.mk
# Dependencies for actually running a run-test.
TEST_ART_RUN_TEST_DEPENDENCIES := \
$(HOST_OUT_EXECUTABLES)/dx \
+ $(HOST_OUT_EXECUTABLES)/hiddenapi \
$(HOST_OUT_EXECUTABLES)/jasmin \
$(HOST_OUT_EXECUTABLES)/smali \
$(HOST_OUT_EXECUTABLES)/dexmerger \
diff --git a/test/HiddenApi/Main.java b/test/HiddenApi/Main.java
new file mode 100644
index 0000000000..187dd6e599
--- /dev/null
+++ b/test/HiddenApi/Main.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public int ifield;
+ private static Object sfield;
+
+ void imethod(long x) {}
+ public static void smethod(Object x) {}
+
+ public native void inmethod(char x);
+ protected native static void snmethod(Integer x);
+}
diff --git a/test/ManyMethods/ManyMethods.java b/test/ManyMethods/ManyMethods.java
index b3a57f6b3b..98b9faffcd 100644
--- a/test/ManyMethods/ManyMethods.java
+++ b/test/ManyMethods/ManyMethods.java
@@ -26,6 +26,8 @@ class ManyMethods {
public static String msg7 = "Hello World7";
public static String msg8 = "Hello World8";
public static String msg9 = "Hello World9";
+ public static String msg10 = "Hello World10";
+ public static String msg11 = "Hello World11";
}
static class Printer {
@@ -57,35 +59,35 @@ class ManyMethods {
}
public static void Print4() {
- Printer.Print(Strings.msg2);
+ Printer.Print(Strings.msg4);
}
public static void Print5() {
- Printer.Print(Strings.msg3);
+ Printer.Print(Strings.msg5);
}
public static void Print6() {
- Printer2.Print(Strings.msg4);
+ Printer2.Print(Strings.msg6);
}
public static void Print7() {
- Printer.Print(Strings.msg5);
+ Printer.Print(Strings.msg7);
}
public static void Print8() {
- Printer.Print(Strings.msg6);
+ Printer.Print(Strings.msg8);
}
public static void Print9() {
- Printer2.Print(Strings.msg7);
+ Printer2.Print(Strings.msg9);
}
public static void Print10() {
- Printer2.Print(Strings.msg8);
+ Printer2.Print(Strings.msg10);
}
public static void Print11() {
- Printer.Print(Strings.msg9);
+ Printer.Print(Strings.msg11);
}
public static void main(String args[]) {
diff --git a/test/README.md b/test/README.md
index c68b40b135..350350e9e6 100644
--- a/test/README.md
+++ b/test/README.md
@@ -9,6 +9,8 @@ directory are compiled separately but to the same output directory;
this can be used to exercise "API mismatch" situations by replacing
class files created in the first pass. The "src-ex" directory is
built separately, and is intended for exercising class loaders.
+Resources can be stored in the "res" directory, which is distributed
+together with the executable files.
The gtests are in named directories and contain a .java source
file.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 22c51063fb..2203bdca01 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -25,6 +25,7 @@
#include "instrumentation.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/profile_compilation_info.h"
#include "jit/profiling_info.h"
#include "mirror/class-inl.h"
#include "nativehelper/ScopedUtfChars.h"
@@ -151,7 +152,14 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isAotCompiled(JNIEnv* env,
CHECK(chars.c_str() != nullptr);
ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
chars.c_str(), kRuntimePointerSize);
- return method->GetOatMethodQuickCode(kRuntimePointerSize) != nullptr;
+ const void* oat_code = method->GetOatMethodQuickCode(kRuntimePointerSize);
+ if (oat_code == nullptr) {
+ return false;
+ }
+ const void* actual_code = method->GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
+ bool interpreter =
+ Runtime::Current()->GetClassLinker()->ShouldUseInterpreterEntrypoint(method, actual_code);
+ return !interpreter;
}
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledEntrypoint(JNIEnv* env,
@@ -250,13 +258,6 @@ extern "C" JNIEXPORT int JNICALL Java_Main_getHotnessCounter(JNIEnv* env,
jclass,
jclass cls,
jstring method_name) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit == nullptr) {
- // The hotness counter is valid only under JIT.
- // If we don't JIT return 0 to match test expectations.
- return 0;
- }
-
ArtMethod* method = nullptr;
{
ScopedObjectAccess soa(Thread::Current());
@@ -296,4 +297,25 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isClassMoveable(JNIEnv*,
return runtime->GetHeap()->IsMovableObject(klass);
}
+extern "C" JNIEXPORT void JNICALL Java_Main_waitForCompilation(JNIEnv*, jclass) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->WaitForCompilationToFinish(Thread::Current());
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_stopJit(JNIEnv*, jclass) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->Stop();
+ }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_startJit(JNIEnv*, jclass) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->Start();
+ }
+}
+
} // namespace art
diff --git a/test/etc/default-build b/test/etc/default-build
index 5c8257f210..4dc2393c54 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -81,6 +81,17 @@ else
HAS_SRC_DEX2OAT_UNRESOLVED=false
fi
+if [ -f api-light-greylist.txt -o -f api-dark-greylist.txt -o -f api-blacklist.txt ]; then
+ HAS_HIDDENAPI_SPEC=true
+else
+ HAS_HIDDENAPI_SPEC=false
+fi
+
+# USE_HIDDENAPI=false run-test... will disable hiddenapi.
+if [ -z "${USE_HIDDENAPI}" ]; then
+ USE_HIDDENAPI=true
+fi
+
# DESUGAR=false run-test... will disable desugar.
if [[ "$DESUGAR" == false ]]; then
USE_DESUGAR=false
@@ -321,6 +332,24 @@ function make_dexmerge() {
${DXMERGER} "$dst_file" "${dex_files_to_merge[@]}"
}
+function make_hiddenapi() {
+ local args=()
+ while [[ $# -gt 0 ]]; do
+ args+=("--dex=$1")
+ shift
+ done
+ if [ -f api-light-greylist.txt ]; then
+ args+=("--light-greylist=api-light-greylist.txt")
+ fi
+ if [ -f api-dark-greylist.txt ]; then
+ args+=("--dark-greylist=api-dark-greylist.txt")
+ fi
+ if [ -f api-blacklist.txt ]; then
+ args+=("--blacklist=api-blacklist.txt")
+ fi
+ ${HIDDENAPI} "${args[@]}"
+}
+
# Print the directory name only if it exists.
function maybe_dir() {
local dirname="$1"
@@ -334,6 +363,13 @@ if [ -e classes.dex ]; then
exit 0
fi
+# Helper function for a common test. Evaluate with $(has_mutlidex).
+function has_multidex() {
+ echo [ ${HAS_SRC_MULTIDEX} = "true" \
+ -o ${HAS_JASMIN_MULTIDEX} = "true" \
+ -o ${HAS_SMALI_MULTIDEX} = "true" ]
+}
+
if [ ${HAS_SRC_DEX2OAT_UNRESOLVED} = "true" ]; then
mkdir classes
mkdir classes-ex
@@ -501,9 +537,18 @@ if [ ${HAS_SRC_EX} = "true" ]; then
fi
fi
+# Apply hiddenapi on the dex files if the test has API list file(s).
+if [ ${NEED_DEX} = "true" -a ${USE_HIDDENAPI} = "true" -a ${HAS_HIDDENAPI_SPEC} = "true" ]; then
+ if $(has_multidex); then
+ make_hiddenapi classes.dex classes2.dex
+ else
+ make_hiddenapi classes.dex
+ fi
+fi
+
# Create a single dex jar with two dex files for multidex.
if [ ${NEED_DEX} = "true" ]; then
- if [ ${HAS_SRC_MULTIDEX} = "true" ] || [ ${HAS_JASMIN_MULTIDEX} = "true" ] || [ ${HAS_SMALI_MULTIDEX} = "true" ]; then
+ if $(has_multidex); then
zip $TEST_NAME.jar classes.dex classes2.dex
else
zip $TEST_NAME.jar classes.dex
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 132099a45d..02438701b8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -64,6 +64,7 @@ ARGS=""
EXTERNAL_LOG_TAGS="n" # if y respect externally set ANDROID_LOG_TAGS.
DRY_RUN="n" # if y prepare to run the test but don't run it.
TEST_VDEX="n"
+TEST_DM="n"
TEST_IS_NDEBUG="n"
APP_IMAGE="y"
JVMTI_STRESS="n"
@@ -346,6 +347,9 @@ while true; do
elif [ "x$1" = "x--vdex" ]; then
TEST_VDEX="y"
shift
+ elif [ "x$1" = "x--dm" ]; then
+ TEST_DM="y"
+ shift
elif [ "x$1" = "x--vdex-filter" ]; then
shift
option="$1"
@@ -436,7 +440,13 @@ if [ "$DEBUGGER" = "y" ]; then
msg " adb forward tcp:$PORT tcp:$PORT"
fi
msg " jdb -attach localhost:$PORT"
- DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
+ if [ "$USE_JVM" = "n" ]; then
+ # TODO We should switch over to using the jvmti agent by default.
+ # Need to tell the runtime to enable the internal jdwp implementation.
+ DEBUGGER_OPTS="-XjdwpOptions:transport=dt_socket,address=$PORT,server=y,suspend=y -XjdwpProvider:internal"
+ else
+ DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
+ fi
elif [ "$DEBUGGER" = "agent" ]; then
PORT=12345
# TODO Support ddms connection and support target.
@@ -466,10 +476,9 @@ if [ "$USE_JVMTI" = "y" ]; then
if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
plugin=libopenjdkjvmti.so
fi
+ # We used to add flags here that made the runtime debuggable but that is not
+ # needed anymore since the plugin can do it for us now.
FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xcompiler-option --debuggable"
- # Always make the compilation be debuggable.
- COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
fi
fi
@@ -673,6 +682,7 @@ fi
profman_cmdline="true"
dex2oat_cmdline="true"
vdex_cmdline="true"
+dm_cmdline="true"
mkdir_locations="${mkdir_locations} ${DEX_LOCATION}/dalvik-cache/$ISA"
strip_cmdline="true"
sync_cmdline="true"
@@ -736,6 +746,10 @@ if [ "$PREBUILD" = "y" ]; then
vdex_cmdline="${dex2oat_cmdline} ${VDEX_FILTER} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex --output-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
elif [ "$TEST_VDEX" = "y" ]; then
vdex_cmdline="${dex2oat_cmdline} ${VDEX_FILTER} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
+ elif [ "$TEST_DM" = "y" ]; then
+ dex2oat_cmdline="${dex2oat_cmdline} --output-vdex=$DEX_LOCATION/oat/$ISA/primary.vdex"
+ dm_cmdline="zip -qj $DEX_LOCATION/oat/$ISA/$TEST_NAME.dm $DEX_LOCATION/oat/$ISA/primary.vdex"
+ vdex_cmdline="${dex2oat_cmdline} --dump-timings --dm-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.dm"
elif [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
vdex_cmdline="${dex2oat_cmdline} --input-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex --output-vdex=$DEX_LOCATION/oat/$ISA/$TEST_NAME.vdex"
fi
@@ -783,6 +797,7 @@ dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
# Remove whitespace.
dex2oat_cmdline=$(echo $dex2oat_cmdline)
dalvikvm_cmdline=$(echo $dalvikvm_cmdline)
+dm_cmdline=$(echo $dm_cmdline)
vdex_cmdline=$(echo $vdex_cmdline)
profman_cmdline=$(echo $profman_cmdline)
@@ -807,6 +822,10 @@ if [ "$HOST" = "n" ]; then
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION
fi
+ # Copy resource folder
+ if [ -d res ]; then
+ adb push res $DEX_LOCATION
+ fi
else
adb shell rm -r $DEX_LOCATION >/dev/null 2>&1
adb shell mkdir -p $DEX_LOCATION >/dev/null 2>&1
@@ -815,7 +834,10 @@ if [ "$HOST" = "n" ]; then
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION >/dev/null 2>&1
fi
-
+ # Copy resource folder
+ if [ -d res ]; then
+ adb push res $DEX_LOCATION >/dev/null 2>&1
+ fi
fi
LD_LIBRARY_PATH=/data/$TEST_DIRECTORY/art/$ISA
@@ -827,7 +849,7 @@ if [ "$HOST" = "n" ]; then
fi
# System libraries needed by libarttestd.so
- PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
+ PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libdexfile.so:libbase.so:libnativehelper.so
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
@@ -845,6 +867,7 @@ if [ "$HOST" = "n" ]; then
export PATH=$ANDROID_ROOT/bin:$PATH && \
$profman_cmdline && \
$dex2oat_cmdline && \
+ $dm_cmdline && \
$vdex_cmdline && \
$strip_cmdline && \
$sync_cmdline && \
@@ -921,7 +944,7 @@ else
fi
if [ "$DEV_MODE" = "y" ]; then
- echo "mkdir -p ${mkdir_locations} && $profman_cmdline && $dex2oat_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
+ echo "mkdir -p ${mkdir_locations} && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
fi
cd $ANDROID_BUILD_TOP
@@ -937,6 +960,7 @@ else
mkdir -p ${mkdir_locations} || exit 1
$profman_cmdline || { echo "Profman failed." >&2 ; exit 2; }
$dex2oat_cmdline || { echo "Dex2oat failed." >&2 ; exit 2; }
+ $dm_cmdline || { echo "Dex2oat failed." >&2 ; exit 2; }
$vdex_cmdline || { echo "Dex2oat failed." >&2 ; exit 2; }
$strip_cmdline || { echo "Strip failed." >&2 ; exit 3; }
$sync_cmdline || { echo "Sync failed." >&2 ; exit 4; }
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 9db8e9df0a..2b28409a1f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -256,13 +256,6 @@
"testing deoptimizing at quick-to-interpreter bridge."]
},
{
- "tests": "137-cfi",
- "description": ["CFI unwinding expects managed frames, and the test",
- "does not iterate enough to even compile. JIT also",
- "uses Generic JNI instead of the JNI compiler."],
- "variant": "interpreter | jit"
- },
- {
"tests": "906-iterate-heap",
"description": ["Test 906 iterates the heap filtering with different",
"options. No instances should be created between those",
@@ -391,6 +384,12 @@
"variant": "jvmti-stress & jit | redefine-stress & jit"
},
{
+ "test_patterns": ["674-hiddenapi"],
+ "description": ["hiddenapi test is failing with redefine stress cdex"],
+ "bug": "http://b/72610009",
+ "variant": "redefine-stress & cdex-fast"
+ },
+ {
"test_patterns": ["616-cha"],
"description": ["The test assumes a boot image exists."],
"bug": "http://b/34193647",
@@ -412,7 +411,8 @@
{
"tests": [
"961-default-iface-resolution-gen",
- "964-default-iface-init-gen"
+ "964-default-iface-init-gen",
+ "968-default-partial-compile-gen"
],
"description": ["Tests that just take too long with jvmti-stress"],
"variant": "jvmti-stress | redefine-stress | trace-stress | step-stress"
@@ -431,6 +431,7 @@
},
{
"tests": [
+ "714-invoke-custom-lambda-metafactory",
"950-redefine-intrinsic",
"951-threaded-obsolete",
"952-invoke-custom",
@@ -647,9 +648,20 @@
"bug": "b/64683522"
},
{
+ "tests": ["628-vdex",
+ "629-vdex-speed",
+ "634-vdex-duplicate"],
+ "variant": "cdex-fast",
+ "description": ["Tests that depend on input-vdex are not supported with compact dex"]
+ },
+ {
"tests": "661-oat-writer-layout",
"variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
"description": ["Test is designed to only check --compiler-filter=speed"]
+ },
+ {
+ "tests": "674-HelloWorld-Dm",
+ "variant": "target",
+ "description": ["Requires zip, which isn't available on device"]
}
-
]
diff --git a/test/run-test b/test/run-test
index 75fe15c919..6bcb9cdabb 100755
--- a/test/run-test
+++ b/test/run-test
@@ -113,6 +113,12 @@ if [ -z "$ZIPALIGN" ]; then
fi
export ZIPALIGN
+# If hiddenapi was not set by the environment variable, assume it is in
+# ANDROID_HOST_OUT.
+if [ -z "$HIDDENAPI" ]; then
+ export HIDDENAPI="${ANDROID_HOST_OUT}/bin/hiddenapi"
+fi
+
info="info.txt"
build="build"
run="run"
@@ -421,6 +427,9 @@ while true; do
elif [ "x$1" = "x--vdex" ]; then
run_args="${run_args} --vdex"
shift
+ elif [ "x$1" = "x--dm" ]; then
+ run_args="${run_args} --dm"
+ shift
elif [ "x$1" = "x--vdex-filter" ]; then
shift
filter=$1
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 297ce08bee..3064c76ffd 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -230,12 +230,12 @@ target_config = {
'art-heap-poisoning' : {
'run-test' : ['--interpreter',
'--optimizing',
- '--cdex-fast'],
+ '--cdex-none'],
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_HEAP_POISONING' : 'true',
- # Get some extra automated testing coverage for compact dex.
- 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'fast'
+ # Disable compact dex to get coverage of standard dex file usage.
+ 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
'art-preopt' : {
@@ -280,8 +280,8 @@ target_config = {
'env': {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_READ_BARRIER' : 'false',
- # Get some extra automated testing coverage for compact dex.
- 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'fast'
+ # Disable compact dex to get coverage of standard dex file usage.
+ 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
'art-gtest-gss-gc': {
@@ -327,27 +327,20 @@ target_config = {
'ART_USE_READ_BARRIER' : 'false'
}
},
- 'art-gtest-heap-poisoning': {
- 'make' : 'valgrind-test-art-host64',
- 'env' : {
- 'ART_HEAP_POISONING' : 'true',
- 'ART_USE_READ_BARRIER' : 'false'
- }
- },
# ASAN (host) configurations.
# These configurations need detect_leaks=0 to work in non-setup environments like build bots,
# as our build tools leak. b/37751350
- 'art-gtest-asan': {
+ 'art-gtest-asan': {
'make' : 'test-art-host-gtest',
'env': {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
- 'art-asan': {
+ },
+ 'art-asan': {
'run-test' : ['--interpreter',
'--optimizing',
'--jit'],
@@ -355,7 +348,16 @@ target_config = {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
+ },
+ 'art-gtest-heap-poisoning': {
+ 'make' : 'test-art-host-gtest',
+ 'env' : {
+ 'ART_HEAP_POISONING' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
+ }
+ },
# ART Golem build targets used by go/lem (continuous ART benchmarking),
# (art-opt-cc is used by default since it mimics the default preopt config),
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 93998579f3..3d173f5571 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -185,7 +185,7 @@ def setup_test_env():
_user_input_variants['prebuild'].add('prebuild')
if not _user_input_variants['cdex_level']: # Default
- _user_input_variants['cdex_level'].add('cdex-none')
+ _user_input_variants['cdex_level'].add('cdex-fast')
# By default only run without jvmti
if not _user_input_variants['jvmti']:
@@ -357,11 +357,12 @@ def run_tests(tests):
while threading.active_count() > 2:
time.sleep(0.1)
return
+ # NB The order of components here should match the order of
+ # components in the regex parser in parse_test_name.
test_name = 'test-art-'
test_name += target + '-run-test-'
test_name += run + '-'
test_name += prebuild + '-'
- test_name += cdex_level + '-'
test_name += compiler + '-'
test_name += relocate + '-'
test_name += trace + '-'
@@ -371,6 +372,7 @@ def run_tests(tests):
test_name += pictest + '-'
test_name += debuggable + '-'
test_name += jvmti + '-'
+ test_name += cdex_level + '-'
test_name += test
test_name += address_size
diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h
index fafa1afcda..e46abb6f5a 100644
--- a/test/ti-agent/common_helper.h
+++ b/test/ti-agent/common_helper.h
@@ -22,7 +22,7 @@
namespace art {
-// Taken from art/runtime/modifiers.h
+// Taken from art/runtime/dex/modifiers.h
static constexpr uint32_t kAccStatic = 0x0008; // field, method, ic
jobject GetJavaField(jvmtiEnv* jvmti, JNIEnv* env, jclass field_klass, jfieldID f);
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index 6e29e36e82..7fc289faeb 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -809,7 +809,7 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
.can_signal_thread = 0,
.can_get_source_file_name = 1,
.can_get_line_numbers = 1,
- .can_get_source_debug_extension = 0,
+ .can_get_source_debug_extension = 1,
.can_access_local_variables = 0,
.can_maintain_original_method_order = 0,
.can_generate_single_step_events = 1,
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
index 086a856f51..a97d03c2d4 100644
--- a/test/valgrind-suppressions.txt
+++ b/test/valgrind-suppressions.txt
@@ -75,3 +75,13 @@
process_vm_readv(lvec[...])
fun:process_vm_readv
}
+
+# Suppressions for IsAddressMapped check in MemMapTest
+{
+ MemMapTest_IsAddressMapped
+ Memcheck:Param
+ msync(start)
+ ...
+ fun:_ZN3art10MemMapTest15IsAddressMappedEPv
+ ...
+}
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 34e6a9cd42..bf79751659 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -123,7 +123,7 @@ $(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
# Run ahat-test-dump.jar to generate test-dump.hprof and test-dump-base.hprof
AHAT_TEST_DUMP_DEPENDENCIES := \
$(HOST_OUT_EXECUTABLES)/dalvikvm64 \
- $(ART_HOST_SHARED_LIBRARY_DEPENDENCIES) \
+ $(ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES) \
$(HOST_OUT_EXECUTABLES)/art \
$(HOST_CORE_IMG_OUT_BASE)$(CORE_IMG_SUFFIX)
@@ -134,7 +134,7 @@ $(AHAT_TEST_DUMP_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIES)
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
+ $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_ART := $(HOST_OUT_EXECUTABLES)/art
$(AHAT_TEST_DUMP_BASE_HPROF): PRIVATE_AHAT_TEST_DUMP_JAR := $(AHAT_TEST_DUMP_JAR)
@@ -143,7 +143,7 @@ $(AHAT_TEST_DUMP_BASE_HPROF): $(AHAT_TEST_DUMP_JAR) $(AHAT_TEST_DUMP_DEPENDENCIE
rm -rf $(PRIVATE_AHAT_TEST_ANDROID_DATA)
mkdir -p $(PRIVATE_AHAT_TEST_ANDROID_DATA)
ANDROID_DATA=$(PRIVATE_AHAT_TEST_ANDROID_DATA) \
- $(PRIVATE_AHAT_TEST_ART) --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
+ $(PRIVATE_AHAT_TEST_ART) -d --64 -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
# --- ahat-tests.jar --------------
include $(CLEAR_VARS)
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index fd9ad0bb0f..a20175531d 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -89,4 +89,4 @@ done
echo "Executing $make_command"
-$make_command
+bash -c "$make_command"
diff --git a/tools/cpp-define-generator/constant_class.def b/tools/cpp-define-generator/constant_class.def
index 4f1d875e5a..1310103ab7 100644
--- a/tools/cpp-define-generator/constant_class.def
+++ b/tools/cpp-define-generator/constant_class.def
@@ -15,8 +15,8 @@
*/
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
-#include "modifiers.h" // kAccClassIsFinalizable
#include "base/bit_utils.h" // MostSignificantBit
+#include "dex/modifiers.h" // kAccClassIsFinalizable
#endif
#define DEFINE_FLAG_OFFSET(type_name, field_name, expr) \
diff --git a/tools/cpp-define-generator/constant_globals.def b/tools/cpp-define-generator/constant_globals.def
index 5018f52937..539633e0b3 100644
--- a/tools/cpp-define-generator/constant_globals.def
+++ b/tools/cpp-define-generator/constant_globals.def
@@ -18,8 +18,8 @@
#if defined(DEFINE_INCLUDE_DEPENDENCIES)
#include <atomic> // std::memory_order_relaxed
+#include "dex/modifiers.h"
#include "globals.h" // art::kObjectAlignment
-#include "modifiers.h"
#endif
DEFINE_EXPR(STD_MEMORY_ORDER_RELAXED, int32_t, std::memory_order_relaxed)
diff --git a/tools/dt_fds_forward.py b/tools/dt_fds_forward.py
index 516b7fef96..1f9c41fc26 100755
--- a/tools/dt_fds_forward.py
+++ b/tools/dt_fds_forward.py
@@ -34,10 +34,11 @@ import subprocess
import sys
import time
-LISTEN_START_MESSAGE = b"dt_fd_forward:START-LISTEN\x00"
-LISTEN_END_MESSAGE = b"dt_fd_forward:END-LISTEN\x00"
-ACCEPTED_MESSAGE = b"dt_fd_forward:ACCEPTED\x00"
-CLOSE_MESSAGE = b"dt_fd_forward:CLOSING\x00"
+NEED_HANDSHAKE_MESSAGE = b"HANDSHAKE:REQD\x00"
+LISTEN_START_MESSAGE = b"dt_fd_forward:START-LISTEN\x00"
+LISTEN_END_MESSAGE = b"dt_fd_forward:END-LISTEN\x00"
+ACCEPTED_MESSAGE = b"dt_fd_forward:ACCEPTED\x00"
+CLOSE_MESSAGE = b"dt_fd_forward:CLOSING\x00"
libc = ctypes.cdll.LoadLibrary("libc.so.6")
def eventfd(init_val, flags):
@@ -70,7 +71,7 @@ def send_fds(sock, remote_read, remote_write, remote_event):
"""
Send the three fds over the given socket.
"""
- sock.sendmsg([b"!"], # We don't actually care what we send here.
+ sock.sendmsg([NEED_HANDSHAKE_MESSAGE], # We want the transport to handle the handshake.
[(socket.SOL_SOCKET, # Send over socket.
socket.SCM_RIGHTS, # Payload is file-descriptor array
array.array('i', [remote_read, remote_write, remote_event]))])
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index c96830a592..828c0aac0f 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -59,6 +59,13 @@
name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
},
{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 70958370,
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest#testEnableCollection001" ]
+},
+{
description: "Test crashes",
result: EXEC_FAILED,
bug: 69591477,
diff --git a/tools/generate-boot-image-profile.sh b/tools/generate-boot-image-profile.sh
index d87123ad71..ee53f43865 100755
--- a/tools/generate-boot-image-profile.sh
+++ b/tools/generate-boot-image-profile.sh
@@ -46,7 +46,9 @@ for file in "$@"; do
fi
done
-jar_args=()
+# Boot jars have hidden API access flags which do not pass dex file
+# verification. Skip it.
+jar_args=("--skip-apk-verification")
boot_jars=$("$ANDROID_BUILD_TOP"/art/tools/bootjars.sh --target)
jar_dir=$ANDROID_BUILD_TOP/$(get_build_var TARGET_OUT_JAVA_LIBRARIES)
for file in $boot_jars; do
diff --git a/tools/hiddenapi/Android.bp b/tools/hiddenapi/Android.bp
new file mode 100644
index 0000000000..f9824f1fa3
--- /dev/null
+++ b/tools/hiddenapi/Android.bp
@@ -0,0 +1,65 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+ name: "hiddenapi-defaults",
+ host_supported: true,
+ device_supported: false,
+ defaults: ["art_defaults"],
+ srcs: [
+ "hiddenapi.cc",
+ ],
+
+ target: {
+ android: {
+ compile_multilib: "prefer32",
+ },
+ },
+
+ shared_libs: [
+ "libdexfile",
+ "libbase",
+ ],
+}
+
+art_cc_binary {
+ name: "hiddenapi",
+ defaults: ["hiddenapi-defaults"],
+ shared_libs: [
+ "libart",
+ ],
+}
+
+art_cc_binary {
+ name: "hiddenapid",
+ defaults: [
+ "art_debug_defaults",
+ "hiddenapi-defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ ],
+}
+
+art_cc_test {
+ name: "art_hiddenapi_tests",
+ host_supported: true,
+ device_supported: false,
+ defaults: [
+ "art_gtest_defaults",
+ ],
+ srcs: ["hiddenapi_test.cc"],
+}
diff --git a/tools/hiddenapi/README.md b/tools/hiddenapi/README.md
new file mode 100644
index 0000000000..cad12126dd
--- /dev/null
+++ b/tools/hiddenapi/README.md
@@ -0,0 +1,54 @@
+HiddenApi
+=========
+
+This tool iterates over all class members inside given DEX files and modifies
+their access flags if their signatures appear on one of two lists - greylist and
+blacklist - provided as text file inputs. These access flags denote to the
+runtime that the marked methods/fields should be treated as internal APIs with
+access restricted only to platform code. Methods/fields not mentioned on the two
+lists are assumed to be on a whitelist and left accessible by all code.
+
+API signatures
+==============
+
+The methods/fields to be marked are specified in two text files (greylist,
+blacklist) provided an input. Only one signature per line is allowed.
+
+Types are expected in their DEX format - class descriptors are to be provided in
+"slash" form, e.g. "Ljava/lang/Object;", primitive types in their shorty form,
+e.g. "I" for "int", and a "[" prefix denotes an array type. Lists of types do
+not use any separators, e.g. "ILxyz;F" for "int, xyz, float".
+
+Methods are encoded as:
+ `class_descriptor->method_name(parameter_types)return_type`
+
+Fields are encoded as:
+ `class_descriptor->field_name:field_type`
+
+Bit encoding
+============
+
+Two bits of information are encoded in the DEX access flags. These are encoded
+as unsigned LEB128 values in DEX and so as to not increase the size of the DEX,
+different modifiers were chosen for different kinds of methods/fields.
+
+First bit is encoded as the inversion of visibility access flags (bits 2:0).
+At most one of these flags can be set at any given time. Inverting these bits
+therefore produces a value where at least two bits are set and there is never
+any loss of information.
+
+Second bit is encoded differently for each given type of class member as there
+is no single unused bit such that setting it would not increase the size of the
+LEB128 encoding. The following bits are used:
+
+ * bit 5 for fields as it carries no other meaning
+ * bit 5 for non-native methods, as `synchronized` can only be set on native
+ methods (the Java `synchronized` modifier is bit 17)
+ * bit 9 for native methods, as it carries no meaning and bit 8 (`native`) will
+ make the LEB128 encoding at least two bytes long
+
+Two following bit encoding is used to denote the membership of a method/field:
+
+ * whitelist: `false`, `false`
+ * greylist: `true`, `false`
+ * blacklist: `true`, `true`
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
new file mode 100644
index 0000000000..c893da646d
--- /dev/null
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <iostream>
+#include <unordered_set>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/dex_file-inl.h"
+#include "hidden_api_access_flags.h"
+#include "mem_map.h"
+#include "os.h"
+
+namespace art {
+
+static int original_argc;
+static char** original_argv;
+
+static std::string CommandLine() {
+ std::vector<std::string> command;
+ for (int i = 0; i < original_argc; ++i) {
+ command.push_back(original_argv[i]);
+ }
+ return android::base::Join(command, ' ');
+}
+
+static void UsageErrorV(const char* fmt, va_list ap) {
+ std::string error;
+ android::base::StringAppendV(&error, fmt, ap);
+ LOG(ERROR) << error;
+}
+
+static void UsageError(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+}
+
+NO_RETURN static void Usage(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+
+ UsageError("Command: %s", CommandLine().c_str());
+ UsageError("Usage: hiddenapi [options]...");
+ UsageError("");
+ UsageError(" --dex=<filename>: specify dex file whose members' access flags are to be set.");
+ UsageError(" At least one --dex parameter must be specified.");
+ UsageError("");
+ UsageError(" --light-greylist=<filename>:");
+ UsageError(" --dark-greylist=<filename>:");
+ UsageError(" --blacklist=<filename>: text files with signatures of methods/fields to be marked");
+ UsageError(" greylisted/blacklisted respectively. At least one list must be provided.");
+ UsageError("");
+ UsageError(" --print-hidden-api: dump a list of marked methods/fields to the standard output.");
+ UsageError(" There is no indication which API category they belong to.");
+ UsageError("");
+
+ exit(EXIT_FAILURE);
+}
+
+class DexClass {
+ public:
+ DexClass(const DexFile& dex_file, uint32_t idx)
+ : dex_file_(dex_file), class_def_(dex_file.GetClassDef(idx)) {}
+
+ const DexFile& GetDexFile() const { return dex_file_; }
+
+ const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
+
+ const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
+
+ const char* GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+
+ private:
+ const DexFile& dex_file_;
+ const DexFile::ClassDef& class_def_;
+};
+
+class DexMember {
+ public:
+ DexMember(const DexClass& klass, const ClassDataItemIterator& it)
+ : klass_(klass), it_(it) {
+ DCHECK_EQ(it_.IsAtMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
+ klass_.GetClassIndex());
+ }
+
+ // Sets hidden bits in access flags and writes them back into the DEX in memory.
+ // Note that this will not update the cached data of ClassDataItemIterator
+ // until it iterates over this item again and therefore will fail a CHECK if
+ // it is called multiple times on the same DexMember.
+ void SetHidden(HiddenApiAccessFlags::ApiList value) {
+ const uint32_t old_flags = it_.GetRawMemberAccessFlags();
+ const uint32_t new_flags = HiddenApiAccessFlags::EncodeForDex(old_flags, value);
+ CHECK_EQ(UnsignedLeb128Size(new_flags), UnsignedLeb128Size(old_flags));
+
+ // Locate the LEB128-encoded access flags in class data.
+ // `ptr` initially points to the next ClassData item. We iterate backwards
+ // until we hit the terminating byte of the previous Leb128 value.
+ const uint8_t* ptr = it_.DataPointer();
+ if (it_.IsAtMethod()) {
+ ptr = ReverseSearchUnsignedLeb128(ptr);
+ DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), it_.GetMethodCodeItemOffset());
+ }
+ ptr = ReverseSearchUnsignedLeb128(ptr);
+ DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), old_flags);
+
+ // Overwrite the access flags.
+ UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
+ }
+
+ // Returns true if this member's API entry is in `list`.
+ bool IsOnApiList(const std::unordered_set<std::string>& list) const {
+ return list.find(GetApiEntry()) != list.end();
+ }
+
+ // Constructs a string with a unique signature of this class member.
+ std::string GetApiEntry() const {
+ std::stringstream ss;
+ ss << klass_.GetDescriptor() << "->";
+ if (it_.IsAtMethod()) {
+ const DexFile::MethodId& mid = GetMethodId();
+ ss << klass_.GetDexFile().GetMethodName(mid)
+ << klass_.GetDexFile().GetMethodSignature(mid).ToString();
+ } else {
+ const DexFile::FieldId& fid = GetFieldId();
+ ss << klass_.GetDexFile().GetFieldName(fid) << ":"
+ << klass_.GetDexFile().GetFieldTypeDescriptor(fid);
+ }
+ return ss.str();
+ }
+
+ private:
+ inline const DexFile::MethodId& GetMethodId() const {
+ DCHECK(it_.IsAtMethod());
+ return klass_.GetDexFile().GetMethodId(it_.GetMemberIndex());
+ }
+
+ inline const DexFile::FieldId& GetFieldId() const {
+ DCHECK(!it_.IsAtMethod());
+ return klass_.GetDexFile().GetFieldId(it_.GetMemberIndex());
+ }
+
+ const DexClass& klass_;
+ const ClassDataItemIterator& it_;
+};
+
+class HiddenApi FINAL {
+ public:
+ HiddenApi() : print_hidden_api_(false) {}
+
+ void ParseArgs(int argc, char** argv) {
+ original_argc = argc;
+ original_argv = argv;
+
+ android::base::InitLogging(argv);
+
+ // Skip over the command name.
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
+ if (log_options) {
+ LOG(INFO) << "hiddenapi: option[" << i << "]=" << argv[i];
+ }
+ if (option == "--print-hidden-api") {
+ print_hidden_api_ = true;
+ } else if (option.starts_with("--dex=")) {
+ dex_paths_.push_back(option.substr(strlen("--dex=")).ToString());
+ } else if (option.starts_with("--light-greylist=")) {
+ light_greylist_path_ = option.substr(strlen("--light-greylist=")).ToString();
+ } else if (option.starts_with("--dark-greylist=")) {
+ dark_greylist_path_ = option.substr(strlen("--dark-greylist=")).ToString();
+ } else if (option.starts_with("--blacklist=")) {
+ blacklist_path_ = option.substr(strlen("--blacklist=")).ToString();
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
+ }
+ }
+
+ bool ProcessDexFiles() {
+ if (dex_paths_.empty()) {
+ Usage("No DEX files specified");
+ }
+
+ if (light_greylist_path_.empty() && dark_greylist_path_.empty() && blacklist_path_.empty()) {
+ Usage("No API file specified");
+ }
+
+ if (!light_greylist_path_.empty() && !OpenApiFile(light_greylist_path_, &light_greylist_)) {
+ return false;
+ }
+
+ if (!dark_greylist_path_.empty() && !OpenApiFile(dark_greylist_path_, &dark_greylist_)) {
+ return false;
+ }
+
+ if (!blacklist_path_.empty() && !OpenApiFile(blacklist_path_, &blacklist_)) {
+ return false;
+ }
+
+ MemMap::Init();
+ if (!OpenDexFiles()) {
+ return false;
+ }
+
+ DCHECK(!dex_files_.empty());
+ for (auto& dex_file : dex_files_) {
+ CategorizeAllClasses(*dex_file.get());
+ }
+
+ UpdateDexChecksums();
+ return true;
+ }
+
+ private:
+ bool OpenApiFile(const std::string& path, std::unordered_set<std::string>* list) {
+ DCHECK(list->empty());
+ DCHECK(!path.empty());
+
+ std::ifstream api_file(path, std::ifstream::in);
+ if (api_file.fail()) {
+ LOG(ERROR) << "Unable to open file '" << path << "' " << strerror(errno);
+ return false;
+ }
+
+ for (std::string line; std::getline(api_file, line);) {
+ list->insert(line);
+ }
+
+ api_file.close();
+ return true;
+ }
+
+ bool OpenDexFiles() {
+ ArtDexFileLoader dex_loader;
+ DCHECK(dex_files_.empty());
+
+ for (const std::string& filename : dex_paths_) {
+ std::string error_msg;
+
+ File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+ if (fd.Fd() == -1) {
+ LOG(ERROR) << "Unable to open file '" << filename << "': " << strerror(errno);
+ return false;
+ }
+
+ // Memory-map the dex file with MAP_SHARED flag so that changes in memory
+ // propagate to the underlying file. We run dex file verification as if
+ // the dex file was not in boot claass path to check basic assumptions,
+ // such as that at most one of public/private/protected flag is set.
+ // We do those checks here and skip them when loading the processed file
+ // into boot class path.
+ std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
+ /* location */ filename,
+ /* verify */ true,
+ /* verify_checksum */ true,
+ /* mmap_shared */ true,
+ &error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(ERROR) << "Open failed for '" << filename << "' " << error_msg;
+ return false;
+ }
+
+ if (!dex_file->IsStandardDexFile()) {
+ LOG(ERROR) << "Expected a standard dex file '" << filename << "'";
+ return false;
+ }
+
+ // Change the protection of the memory mapping to read-write.
+ if (!dex_file->EnableWrite()) {
+ LOG(ERROR) << "Failed to enable write permission for '" << filename << "'";
+ return false;
+ }
+
+ dex_files_.push_back(std::move(dex_file));
+ }
+ return true;
+ }
+
+ void CategorizeAllClasses(const DexFile& dex_file) {
+ for (uint32_t class_idx = 0; class_idx < dex_file.NumClassDefs(); ++class_idx) {
+ DexClass klass(dex_file, class_idx);
+ const uint8_t* klass_data = klass.GetData();
+ if (klass_data == nullptr) {
+ continue;
+ }
+
+ for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
+ DexMember member(klass, it);
+
+ // Catagorize member and overwrite its access flags.
+ // Note that if a member appears on multiple API lists, it will be categorized
+ // as the strictest.
+ bool is_hidden = true;
+ if (member.IsOnApiList(blacklist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kBlacklist);
+ } else if (member.IsOnApiList(dark_greylist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kDarkGreylist);
+ } else if (member.IsOnApiList(light_greylist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kLightGreylist);
+ } else {
+ member.SetHidden(HiddenApiAccessFlags::kWhitelist);
+ is_hidden = false;
+ }
+
+ if (print_hidden_api_ && is_hidden) {
+ std::cout << member.GetApiEntry() << std::endl;
+ }
+ }
+ }
+ }
+
+ void UpdateDexChecksums() {
+ for (auto& dex_file : dex_files_) {
+ // Obtain a writeable pointer to the dex header.
+ DexFile::Header* header = const_cast<DexFile::Header*>(&dex_file->GetHeader());
+ // Recalculate checksum and overwrite the value in the header.
+ header->checksum_ = dex_file->CalculateChecksum();
+ }
+ }
+
+ // Print signatures of APIs which have been grey-/blacklisted.
+ bool print_hidden_api_;
+
+ // Paths to DEX files which should be processed.
+ std::vector<std::string> dex_paths_;
+
+ // Paths to text files which contain the lists of API members.
+ std::string light_greylist_path_;
+ std::string dark_greylist_path_;
+ std::string blacklist_path_;
+
+ // Opened DEX files. Note that these are opened as `const` but eventually will be written into.
+ std::vector<std::unique_ptr<const DexFile>> dex_files_;
+
+ // Signatures of DEX members loaded from `light_greylist_path_`, `dark_greylist_path_`,
+ // `blacklist_path_`.
+ std::unordered_set<std::string> light_greylist_;
+ std::unordered_set<std::string> dark_greylist_;
+ std::unordered_set<std::string> blacklist_;
+};
+
+} // namespace art
+
+int main(int argc, char** argv) {
+ art::HiddenApi hiddenapi;
+
+ // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
+ hiddenapi.ParseArgs(argc, argv);
+ return hiddenapi.ProcessDexFiles() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
new file mode 100644
index 0000000000..af1439520f
--- /dev/null
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+
+#include "base/unix_file/fd_file.h"
+#include "common_runtime_test.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/dex_file-inl.h"
+#include "exec_utils.h"
+#include "zip_archive.h"
+
+namespace art {
+
+class HiddenApiTest : public CommonRuntimeTest {
+ protected:
+ std::string GetHiddenApiCmd() {
+ std::string file_path = GetTestAndroidRoot();
+ file_path += "/bin/hiddenapi";
+ if (kIsDebugBuild) {
+ file_path += "d";
+ }
+ if (!OS::FileExists(file_path.c_str())) {
+ LOG(FATAL) << "Could not find binary " << file_path;
+ UNREACHABLE();
+ }
+ return file_path;
+ }
+
+ std::unique_ptr<const DexFile> RunHiddenApi(const ScratchFile& light_greylist,
+ const ScratchFile& dark_greylist,
+ const ScratchFile& blacklist,
+ const std::vector<std::string>& extra_args,
+ ScratchFile* out_dex) {
+ std::string error;
+ std::unique_ptr<ZipArchive> jar(
+ ZipArchive::Open(GetTestDexFileName("HiddenApi").c_str(), &error));
+ if (jar == nullptr) {
+ LOG(FATAL) << "Could not open test file " << GetTestDexFileName("HiddenApi") << ": " << error;
+ UNREACHABLE();
+ }
+ std::unique_ptr<ZipEntry> jar_classes_dex(jar->Find("classes.dex", &error));
+ if (jar_classes_dex == nullptr) {
+ LOG(FATAL) << "Could not find classes.dex in test file " << GetTestDexFileName("HiddenApi")
+ << ": " << error;
+ UNREACHABLE();
+ } else if (!jar_classes_dex->ExtractToFile(*out_dex->GetFile(), &error)) {
+ LOG(FATAL) << "Could not extract classes.dex from test file "
+ << GetTestDexFileName("HiddenApi") << ": " << error;
+ UNREACHABLE();
+ }
+
+ std::vector<std::string> argv_str;
+ argv_str.push_back(GetHiddenApiCmd());
+ argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
+ argv_str.push_back("--dex=" + out_dex->GetFilename());
+ argv_str.push_back("--light-greylist=" + light_greylist.GetFilename());
+ argv_str.push_back("--dark-greylist=" + dark_greylist.GetFilename());
+ argv_str.push_back("--blacklist=" + blacklist.GetFilename());
+ int return_code = ExecAndReturnCode(argv_str, &error);
+ if (return_code != 0) {
+ LOG(FATAL) << "HiddenApi binary exited with unexpected return code " << return_code;
+ }
+ return OpenDex(*out_dex);
+ }
+
+ std::unique_ptr<const DexFile> OpenDex(const ScratchFile& file) {
+ ArtDexFileLoader dex_loader;
+ std::string error_msg;
+
+ File fd(file.GetFilename(), O_RDONLY, /* check_usage */ false);
+ if (fd.Fd() == -1) {
+ LOG(FATAL) << "Unable to open file '" << file.GetFilename() << "': " << strerror(errno);
+ UNREACHABLE();
+ }
+
+ std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(
+ fd.Release(), /* location */ file.GetFilename(), /* verify */ false,
+ /* verify_checksum */ true, /* mmap_shared */ false, &error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << error_msg;
+ UNREACHABLE();
+ } else if (!dex_file->IsStandardDexFile()) {
+ LOG(FATAL) << "Expected a standard dex file '" << file.GetFilename() << "'";
+ UNREACHABLE();
+ }
+
+ return dex_file;
+ }
+
+ std::ofstream OpenStream(const ScratchFile& file) {
+ std::ofstream ofs(file.GetFilename(), std::ofstream::out);
+ if (ofs.fail()) {
+ LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << strerror(errno);
+ UNREACHABLE();
+ }
+ return ofs;
+ }
+
+ const DexFile::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
+ for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ if (strcmp(desc, dex_file.GetClassDescriptor(class_def)) == 0) {
+ return class_def;
+ }
+ }
+ LOG(FATAL) << "Could not find class " << desc;
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetFieldHiddenFlags(const char* name,
+ uint32_t expected_visibility,
+ const DexFile::ClassDef& class_def,
+ const DexFile& dex_file) {
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
+ UNREACHABLE();
+ }
+
+ for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod()) {
+ break;
+ }
+ const DexFile::FieldId& fid = dex_file.GetFieldId(it.GetMemberIndex());
+ if (strcmp(name, dex_file.GetFieldName(fid)) == 0) {
+ uint32_t actual_visibility = it.GetFieldAccessFlags() & kAccVisibilityFlags;
+ if (actual_visibility != expected_visibility) {
+ LOG(FATAL) << "Field " << name << " in class " << dex_file.GetClassDescriptor(class_def)
+ << " does not have the expected visibility flags (" << expected_visibility
+ << " != " << actual_visibility << ")";
+ UNREACHABLE();
+ }
+ return it.DecodeHiddenAccessFlags();
+ }
+ }
+
+ LOG(FATAL) << "Could not find field " << name << " in class "
+ << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetMethodHiddenFlags(const char* name,
+ uint32_t expected_visibility,
+ bool expected_native,
+ const DexFile::ClassDef& class_def,
+ const DexFile& dex_file) {
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
+ UNREACHABLE();
+ }
+
+ for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
+ if (!it.IsAtMethod()) {
+ continue;
+ }
+ const DexFile::MethodId& mid = dex_file.GetMethodId(it.GetMemberIndex());
+ if (strcmp(name, dex_file.GetMethodName(mid)) == 0) {
+ if (expected_native != it.MemberIsNative()) {
+ LOG(FATAL) << "Expected native=" << expected_native << " for method " << name
+ << " in class " << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+ uint32_t actual_visibility = it.GetMethodAccessFlags() & kAccVisibilityFlags;
+ if (actual_visibility != expected_visibility) {
+ LOG(FATAL) << "Method " << name << " in class " << dex_file.GetClassDescriptor(class_def)
+ << " does not have the expected visibility flags (" << expected_visibility
+ << " != " << actual_visibility << ")";
+ UNREACHABLE();
+ }
+ return it.DecodeHiddenAccessFlags();
+ }
+ }
+
+ LOG(FATAL) << "Could not find method " << name << " in class "
+ << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetIFieldHiddenFlags(const DexFile& dex_file) {
+ return GetFieldHiddenFlags("ifield", kAccPublic, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSFieldHiddenFlags(const DexFile& dex_file) {
+ return GetFieldHiddenFlags("sfield", kAccPrivate, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetIMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "imethod", 0, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "smethod", kAccPublic, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetINMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "inmethod", kAccPublic, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSNMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "snmethod", kAccProtected, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ }
+};
+
+TEST_F(HiddenApiTest, InstanceFieldNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+} // namespace art
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index a25ef3fca5..203e03d678 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -106,14 +106,14 @@ class DexFuzzTester(object):
self.RunDexFuzz()
def CompileOnHost(self):
- """Compiles Test.java into classes.dex using either javac/dx or jack.
+ """Compiles Test.java into classes.dex using either javac/dx,d8 or jack.
Raises:
FatalError: error when compilation fails
"""
if self._dexer == 'dx' or self._dexer == 'd8':
dbg = '-g' if self._debug_info else '-g:none'
- if RunCommand(['javac', dbg, 'Test.java'],
+ if RunCommand(['javac', '--release=8', dbg, 'Test.java'],
out=None, err='jerr.txt', timeout=30) != RetCode.SUCCESS:
print('Unexpected error while running javac')
raise FatalError('Unexpected error while running javac')
diff --git a/tools/jfuzz/run_jfuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index 34180d993f..4a54a3a4f2 100755
--- a/tools/jfuzz/run_jfuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -133,7 +133,7 @@ class TestRunnerWithHostCompilation(TestRunner):
def CompileOnHost(self):
if self._dexer == 'dx' or self._dexer == 'd8':
dbg = '-g' if self._debug_info else '-g:none'
- if RunCommand(['javac', dbg, 'Test.java'],
+ if RunCommand(['javac', '--release=8', dbg, 'Test.java'],
out=None, err=None, timeout=30) == RetCode.SUCCESS:
dx = 'dx' if self._dexer == 'dx' else 'd8-compat-dx'
retc = RunCommand([dx, '--dex', '--output=classes.dex'] + glob('*.class'),
@@ -169,7 +169,7 @@ class TestRunnerRIOnHost(TestRunner):
def CompileAndRunTest(self):
dbg = '-g' if self._debug_info else '-g:none'
- if RunCommand(['javac', dbg, 'Test.java'],
+ if RunCommand(['javac', '--release=8', dbg, 'Test.java'],
out=None, err=None, timeout=30) == RetCode.SUCCESS:
retc = RunCommand(['java', 'Test'], self.output_file, err=None)
else:
diff --git a/tools/prebuilt_libjdwp_art_failures.txt b/tools/prebuilt_libjdwp_art_failures.txt
index 7694a4c7e4..7a8a4dd35f 100644
--- a/tools/prebuilt_libjdwp_art_failures.txt
+++ b/tools/prebuilt_libjdwp_art_failures.txt
@@ -118,5 +118,12 @@
result: EXEC_FAILED,
bug: 69591477,
name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ExitTest#testExit001"
+},
+{
+ description: "Test is flakey",
+ result: EXEC_FAILED,
+ bug: 70958370,
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest#testEnableCollection001" ]
}
]
diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt
index 4b01796a0a..734fd1e50b 100644
--- a/tools/public.libraries.buildbot.txt
+++ b/tools/public.libraries.buildbot.txt
@@ -1,5 +1,6 @@
libart.so
libartd.so
+libdexfile.so
libbacktrace.so
libc.so
libc++.so
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 2cf614d795..b512612175 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -286,6 +286,10 @@ fi
if [[ $mode != "ri" ]]; then
toolchain_args="--toolchain d8 --language CUR"
+ if [[ "x$with_jdwp_path" == "x" ]]; then
+ # Need to enable the internal jdwp implementation.
+ art_debugee="${art_debugee} -XjdwpProvider:internal"
+ fi
else
toolchain_args="--toolchain javac --language CUR"
fi