summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk26
-rw-r--r--build/Android.bp38
-rw-r--r--build/Android.gtest.mk155
-rw-r--r--build/Android.oat.mk108
-rw-r--r--cmdline/cmdline_parser_test.cc2
-rw-r--r--cmdline/unit.h5
-rw-r--r--compiler/Android.bp3
-rw-r--r--compiler/common_compiler_test.cc124
-rw-r--r--compiler/common_compiler_test.h37
-rw-r--r--compiler/compiler.h6
-rw-r--r--compiler/debug/dwarf/dwarf_test.h4
-rw-r--r--compiler/debug/elf_debug_info_writer.h6
-rw-r--r--compiler/debug/elf_debug_line_writer.h4
-rw-r--r--compiler/debug/elf_debug_loc_writer.h6
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc3
-rw-r--r--compiler/dex/dex_to_dex_decompiler_test.cc9
-rw-r--r--compiler/dex/inline_method_analyser.cc3
-rw-r--r--compiler/driver/compiled_method_storage.cc61
-rw-r--r--compiler/driver/compiled_method_storage_test.cc2
-rw-r--r--compiler/driver/compiler_driver.cc212
-rw-r--r--compiler/driver/compiler_driver.h82
-rw-r--r--compiler/driver/compiler_driver_test.cc4
-rw-r--r--compiler/driver/compiler_options.cc21
-rw-r--r--compiler/driver/compiler_options.h49
-rw-r--r--compiler/exception_test.cc11
-rw-r--r--compiler/jit/jit_compiler.cc41
-rw-r--r--compiler/jit/jit_compiler.h16
-rw-r--r--compiler/jni/quick/jni_compiler.cc17
-rw-r--r--compiler/jni/quick/jni_compiler.h4
-rw-r--r--compiler/linker/linker_patch.h43
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc2
-rw-r--r--compiler/optimizing/code_generator.cc175
-rw-r--r--compiler/optimizing/code_generator.h10
-rw-r--r--compiler/optimizing/code_generator_arm64.cc400
-rw-r--r--compiler/optimizing/code_generator_arm64.h30
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc108
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h13
-rw-r--r--compiler/optimizing/code_generator_mips.cc89
-rw-r--r--compiler/optimizing/code_generator_mips.h13
-rw-r--r--compiler/optimizing/code_generator_mips64.cc91
-rw-r--r--compiler/optimizing/code_generator_mips64.h13
-rw-r--r--compiler/optimizing/code_generator_vector_arm64.cc12
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc56
-rw-r--r--compiler/optimizing/code_generator_vector_x86_64.cc58
-rw-r--r--compiler/optimizing/code_generator_x86.cc104
-rw-r--r--compiler/optimizing/code_generator_x86.h18
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc79
-rw-r--r--compiler/optimizing/code_generator_x86_64.h16
-rw-r--r--compiler/optimizing/codegen_test.cc41
-rw-r--r--compiler/optimizing/codegen_test_utils.h56
-rw-r--r--compiler/optimizing/common_arm64.h40
-rw-r--r--compiler/optimizing/constant_folding_test.cc5
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc6
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc5
-rw-r--r--compiler/optimizing/emit_swap_mips_test.cc16
-rw-r--r--compiler/optimizing/gvn.cc5
-rw-r--r--compiler/optimizing/inliner.cc46
-rw-r--r--compiler/optimizing/instruction_builder.cc144
-rw-r--r--compiler/optimizing/instruction_builder.h19
-rw-r--r--compiler/optimizing/instruction_simplifier.cc57
-rw-r--r--compiler/optimizing/instruction_simplifier.h6
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.cc149
-rw-r--r--compiler/optimizing/instruction_simplifier_x86.h44
-rw-r--r--compiler/optimizing/intrinsic_objects.cc120
-rw-r--r--compiler/optimizing/intrinsic_objects.h83
-rw-r--r--compiler/optimizing/intrinsics.cc376
-rw-r--r--compiler/optimizing/intrinsics.h54
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc35
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc35
-rw-r--r--compiler/optimizing/intrinsics_mips.cc45
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc41
-rw-r--r--compiler/optimizing/intrinsics_x86.cc68
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc47
-rw-r--r--compiler/optimizing/linearize_test.cc8
-rw-r--r--compiler/optimizing/live_ranges_test.cc38
-rw-r--r--compiler/optimizing/liveness_test.cc8
-rw-r--r--compiler/optimizing/loop_analysis.cc56
-rw-r--r--compiler/optimizing/loop_analysis.h57
-rw-r--r--compiler/optimizing/loop_optimization.cc138
-rw-r--r--compiler/optimizing/loop_optimization.h23
-rw-r--r--compiler/optimizing/loop_optimization_test.cc3
-rw-r--r--compiler/optimizing/nodes.cc21
-rw-r--r--compiler/optimizing/nodes.h49
-rw-r--r--compiler/optimizing/nodes_vector.h4
-rw-r--r--compiler/optimizing/optimization.cc17
-rw-r--r--compiler/optimizing/optimization.h1
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc16
-rw-r--r--compiler/optimizing/optimizing_compiler.cc55
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h1
-rw-r--r--compiler/optimizing/optimizing_unit_test.h72
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc24
-rw-r--r--compiler/optimizing/register_allocator_graph_color.cc16
-rw-r--r--compiler/optimizing/register_allocator_test.cc70
-rw-r--r--compiler/optimizing/scheduler.h8
-rw-r--r--compiler/optimizing/scheduler_test.cc2
-rw-r--r--compiler/optimizing/select_generator.cc12
-rw-r--r--compiler/optimizing/select_generator_test.cc96
-rw-r--r--compiler/optimizing/sharpening.cc70
-rw-r--r--compiler/optimizing/sharpening.h13
-rw-r--r--compiler/optimizing/ssa_builder.cc79
-rw-r--r--compiler/optimizing/ssa_builder.h9
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc13
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h22
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc32
-rw-r--r--compiler/optimizing/stack_map_stream.cc264
-rw-r--r--compiler/optimizing/stack_map_stream.h97
-rw-r--r--compiler/optimizing/stack_map_test.cc361
-rw-r--r--compiler/optimizing/superblock_cloner.cc22
-rw-r--r--compiler/optimizing/superblock_cloner_test.cc70
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc5
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc5
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--compiler/utils/dedupe_set-inl.h6
-rw-r--r--compiler/utils/managed_register.h4
-rw-r--r--compiler/utils/mips/assembler_mips.cc5
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc5
-rw-r--r--compiler/utils/x86/assembler_x86.cc145
-rw-r--r--compiler/utils/x86/assembler_x86.h11
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc3
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc142
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h15
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc3
-rw-r--r--compiler/verifier_deps_test.cc2
-rw-r--r--dex2oat/dex2oat.cc227
-rw-r--r--dex2oat/dex2oat_image_test.cc5
-rw-r--r--dex2oat/dex2oat_options.cc8
-rw-r--r--dex2oat/dex2oat_options.def2
-rw-r--r--dex2oat/dex2oat_options.h1
-rw-r--r--dex2oat/dex2oat_test.cc12
-rw-r--r--dex2oat/linker/arm/relative_patcher_arm_base.cc2
-rw-r--r--dex2oat/linker/arm/relative_patcher_thumb2_test.cc5
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64.cc7
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64_test.cc5
-rw-r--r--dex2oat/linker/elf_writer_quick.cc45
-rw-r--r--dex2oat/linker/elf_writer_quick.h4
-rw-r--r--dex2oat/linker/image_test.h83
-rw-r--r--dex2oat/linker/image_writer.cc150
-rw-r--r--dex2oat/linker/image_writer.h28
-rw-r--r--dex2oat/linker/oat_writer.cc107
-rw-r--r--dex2oat/linker/oat_writer.h39
-rw-r--r--dex2oat/linker/oat_writer_test.cc67
-rw-r--r--dex2oat/linker/relative_patcher_test.h52
-rw-r--r--dexdump/dexdump.cc2
-rw-r--r--dexdump/dexdump_cfg.cc2
-rw-r--r--dexlayout/compact_dex_writer.cc68
-rw-r--r--dexlayout/compact_dex_writer.h2
-rw-r--r--dexlayout/dex_ir.cc899
-rw-r--r--dexlayout/dex_ir.h654
-rw-r--r--dexlayout/dex_ir_builder.cc1117
-rw-r--r--dexlayout/dex_verify.cc91
-rw-r--r--dexlayout/dex_verify.h8
-rw-r--r--dexlayout/dex_visualize.cc14
-rw-r--r--dexlayout/dex_writer.cc204
-rw-r--r--dexlayout/dexdiag.cc4
-rw-r--r--dexlayout/dexlayout.cc152
-rw-r--r--dexlayout/dexlayout_test.cc4
-rw-r--r--imgdiag/imgdiag.cc8
-rw-r--r--libartbase/Android.bp2
-rw-r--r--libartbase/base/arena_allocator.h29
-rw-r--r--libartbase/base/arena_allocator_test.cc8
-rw-r--r--libartbase/base/arena_containers.h8
-rw-r--r--libartbase/base/atomic.h14
-rw-r--r--libartbase/base/bit_memory_region.h92
-rw-r--r--libartbase/base/bit_table.h348
-rw-r--r--libartbase/base/bit_table_test.cc131
-rw-r--r--libartbase/base/common_art_test.h20
-rw-r--r--libartbase/base/data_hash.h107
-rw-r--r--libartbase/base/file_utils_test.cc5
-rw-r--r--libartbase/base/fuchsia_compat.h36
-rw-r--r--libartbase/base/globals.h15
-rw-r--r--libartbase/base/hash_map.h9
-rw-r--r--libartbase/base/hash_set.h233
-rw-r--r--libartbase/base/hash_set_test.cc119
-rw-r--r--libartbase/base/indenter.h4
-rw-r--r--libartbase/base/iteration_range.h6
-rw-r--r--libartbase/base/malloc_arena_pool.cc6
-rw-r--r--libartbase/base/mem_map.cc75
-rw-r--r--libartbase/base/mem_map.h22
-rw-r--r--libartbase/base/mem_map_fuchsia.cc144
-rw-r--r--libartbase/base/mem_map_test.cc45
-rw-r--r--libartbase/base/mem_map_unix.cc35
-rw-r--r--libartbase/base/memory_tool.h56
-rw-r--r--libartbase/base/scoped_arena_containers.h10
-rw-r--r--libartbase/base/stats.h60
-rw-r--r--libartbase/base/utils.h14
-rw-r--r--libartbase/base/variant_map_test.cc2
-rw-r--r--libdexfile/dex/class_accessor-inl.h19
-rw-r--r--libdexfile/dex/class_accessor.h13
-rw-r--r--libdexfile/dex/class_accessor_test.cc3
-rw-r--r--libdexfile/dex/dex_file-inl.h4
-rw-r--r--libdexfile/dex/dex_file.cc9
-rw-r--r--libdexfile/dex/dex_file.h7
-rw-r--r--libdexfile/dex/dex_file_tracking_registrar.cc3
-rw-r--r--libdexfile/dex/dex_file_verifier.cc6
-rw-r--r--libdexfile/dex/dex_instruction_test.cc4
-rw-r--r--libdexfile/dex/invoke_type.h3
-rw-r--r--libprofile/profile/profile_compilation_info.cc14
-rw-r--r--libprofile/profile/profile_compilation_info.h5
-rw-r--r--oatdump/oatdump.cc361
-rw-r--r--oatdump/oatdump_test.h2
-rw-r--r--openjdkjvmti/ti_ddms.cc2
-rw-r--r--patchoat/patchoat.cc6
-rw-r--r--patchoat/patchoat_test.cc41
-rw-r--r--profman/profman.cc6
-rw-r--r--runtime/Android.bp13
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S93
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S94
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc3
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S85
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S88
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.cc21
-rw-r--r--runtime/arch/x86/instruction_set_features_x86.h2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S126
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S86
-rw-r--r--runtime/art_method.cc4
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/mem_map_arena_pool.cc2
-rw-r--r--runtime/base/mutex-inl.h4
-rw-r--r--runtime/base/mutex.h5
-rw-r--r--runtime/check_reference_map_visitor.h8
-rw-r--r--runtime/class_linker.cc67
-rw-r--r--runtime/class_linker.h2
-rw-r--r--runtime/class_loader_utils.h8
-rw-r--r--runtime/class_table.cc22
-rw-r--r--runtime/common_runtime_test.cc2
-rw-r--r--runtime/common_runtime_test.h7
-rw-r--r--runtime/dex_register_location.cc50
-rw-r--r--runtime/dex_register_location.h32
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h32
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc10
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h4
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc157
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/exec_utils_test.cc34
-rw-r--r--runtime/gc/allocator/rosalloc.h4
-rw-r--r--runtime/gc/collector/concurrent_copying-inl.h33
-rw-r--r--runtime/gc/collector/concurrent_copying.cc182
-rw-r--r--runtime/gc/collector/concurrent_copying.h40
-rw-r--r--runtime/gc/collector/semi_space.cc3
-rw-r--r--runtime/gc/heap-inl.h23
-rw-r--r--runtime/gc/heap.cc4
-rw-r--r--runtime/gc/heap.h17
-rw-r--r--runtime/gc/heap_verification_test.cc7
-rw-r--r--runtime/gc/space/image_space.cc8
-rw-r--r--runtime/gc/space/large_object_space.cc5
-rw-r--r--runtime/gc/space/memory_tool_malloc_space-inl.h141
-rw-r--r--runtime/gc/space/rosalloc_space.cc6
-rw-r--r--runtime/gc/space/rosalloc_space.h4
-rw-r--r--runtime/hprof/hprof.cc1
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/image.h12
-rw-r--r--runtime/instrumentation.cc28
-rw-r--r--runtime/intern_table.cc18
-rw-r--r--runtime/interpreter/interpreter_common.cc115
-rw-r--r--runtime/interpreter/interpreter_common.h24
-rw-r--r--runtime/interpreter/mterp/arm/op_mul_long.S1
-rw-r--r--runtime/interpreter/mterp/out/mterp_arm.S1
-rw-r--r--runtime/interpreter/unstarted_runtime.cc30
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc232
-rw-r--r--runtime/jdwp_provider.h23
-rw-r--r--runtime/jit/jit.cc12
-rw-r--r--runtime/jit/jit_code_cache.cc7
-rw-r--r--runtime/jit/jit_code_cache.h12
-rw-r--r--runtime/jit/profiling_info_test.cc1
-rw-r--r--runtime/jni/check_jni.cc (renamed from runtime/check_jni.cc)20
-rw-r--r--runtime/jni/check_jni.h (renamed from runtime/check_jni.h)6
-rw-r--r--runtime/jni/java_vm_ext.cc6
-rw-r--r--runtime/jni/jni_internal.cc62
-rw-r--r--runtime/mirror/class.cc16
-rw-r--r--runtime/mirror/dex_cache-inl.h12
-rw-r--r--runtime/mirror/dex_cache.h4
-rw-r--r--runtime/mirror/object-inl.h532
-rw-r--r--runtime/mirror/object-readbarrier-inl.h101
-rw-r--r--runtime/mirror/object.cc14
-rw-r--r--runtime/mirror/object.h164
-rw-r--r--runtime/mirror/object_array-inl.h7
-rw-r--r--runtime/mirror/var_handle.cc47
-rw-r--r--runtime/monitor.cc23
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc11
-rw-r--r--runtime/native/sun_misc_Unsafe.cc16
-rw-r--r--runtime/native_stack_dump.cc5
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.cc62
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/oat_file_assistant.cc6
-rw-r--r--runtime/oat_quick_method_header.cc2
-rw-r--r--runtime/oat_quick_method_header.h2
-rw-r--r--runtime/proxy_test.cc15
-rw-r--r--runtime/quick_exception_handler.cc41
-rw-r--r--runtime/read_barrier-inl.h14
-rw-r--r--runtime/reflection.cc140
-rw-r--r--runtime/reflection.h12
-rw-r--r--runtime/runtime.cc113
-rw-r--r--runtime/runtime.h2
-rw-r--r--runtime/runtime_callbacks_test.cc3
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/stack.cc60
-rw-r--r--runtime/stack.h13
-rw-r--r--runtime/stack_map.cc219
-rw-r--r--runtime/stack_map.h379
-rw-r--r--runtime/subtype_check.h18
-rw-r--r--runtime/subtype_check_info_test.cc4
-rw-r--r--runtime/subtype_check_test.cc16
-rw-r--r--runtime/thread.cc39
-rw-r--r--runtime/thread.h16
-rw-r--r--runtime/verifier/method_verifier.cc51
-rw-r--r--runtime/verify_object.h4
-rw-r--r--runtime/well_known_classes.cc11
-rw-r--r--runtime/well_known_classes.h1
-rw-r--r--runtime/write_barrier-inl.h56
-rw-r--r--runtime/write_barrier.h62
-rw-r--r--sigchainlib/sigchain_test.cc2
-rw-r--r--test/003-omnibus-opcodes/build24
-rwxr-xr-xtest/004-JniTest/build40
-rw-r--r--test/004-ReferenceMap/build30
-rw-r--r--test/004-ReferenceMap/classes.dexbin0 -> 1108 bytes
-rw-r--r--test/004-StackWalk/build29
-rw-r--r--test/004-StackWalk/classes.dexbin0 -> 3912 bytes
-rwxr-xr-xtest/004-ThreadStress/run24
-rw-r--r--test/004-ThreadStress/src-art/Main.java8
-rw-r--r--test/005-annotations/build34
-rw-r--r--test/022-interface/build22
-rw-r--r--test/056-const-string-jumbo/build8
-rw-r--r--test/066-mismatched-super/build (renamed from test/569-checker-pattern-replacement/build)5
-rw-r--r--test/089-many-methods/build11
-rwxr-xr-xtest/089-many-methods/check6
-rw-r--r--test/089-many-methods/expected.txt7
-rwxr-xr-xtest/091-override-package-private-method/build25
-rw-r--r--test/111-unresolvable-exception/build27
-rw-r--r--test/113-multidex/build37
-rw-r--r--test/113-multidex/src-multidex/Main.java (renamed from test/113-multidex/src/Main.java)0
-rw-r--r--test/117-nopatchoat/nopatchoat.cc6
-rw-r--r--test/124-missing-classes/build28
-rw-r--r--test/126-miranda-multidex/build39
-rwxr-xr-xtest/127-checker-secondarydex/build31
-rw-r--r--test/137-cfi/cfi.cc85
-rw-r--r--test/137-cfi/expected.txt5
-rwxr-xr-xtest/137-cfi/run2
-rw-r--r--test/137-cfi/src-multidex/Base.java10
-rw-r--r--test/137-cfi/src/Main.java187
-rwxr-xr-xtest/138-duplicate-classes-check2/build32
-rw-r--r--test/1948-obsolete-const-method-handle/build4
-rw-r--r--test/303-verification-stress/build9
-rw-r--r--test/411-checker-hdiv-hrem-pow2/expected.txt0
-rw-r--r--test/411-checker-hdiv-hrem-pow2/info.txt2
-rw-r--r--test/411-checker-hdiv-hrem-pow2/src/DivTest.java251
-rw-r--r--test/411-checker-hdiv-hrem-pow2/src/Main.java22
-rw-r--r--test/411-checker-hdiv-hrem-pow2/src/RemTest.java257
-rwxr-xr-xtest/442-checker-constant-folding/build20
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java84
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc46
-rw-r--r--test/466-get-live-vreg/src/Main.java31
-rw-r--r--test/477-checker-bound-type/src/Main.java76
-rw-r--r--test/530-checker-lse/smali/Main.smali32
-rw-r--r--test/530-checker-lse/src/Main.java22
-rw-r--r--test/530-checker-lse2/build20
-rw-r--r--test/530-checker-peel-unroll/src/Main.java108
-rw-r--r--test/551-checker-shifter-operand/src/Main.java136
-rw-r--r--test/563-checker-fakestring/smali/TestCase.smali118
-rw-r--r--test/563-checker-fakestring/src/Main.java44
-rw-r--r--test/565-checker-condition-liveness/info.txt2
-rw-r--r--test/565-checker-condition-liveness/src/Main.java84
-rwxr-xr-xtest/565-checker-doublenegbitwise/build20
-rw-r--r--test/565-checker-doublenegbitwise/smali/SmaliTests.smali588
-rw-r--r--test/565-checker-doublenegbitwise/src/Main.java301
-rw-r--r--test/565-checker-rotate/build20
-rw-r--r--test/565-checker-rotate/smali/Main2.smali165
-rw-r--r--test/565-checker-rotate/src-art/Main.java546
-rw-r--r--test/565-checker-rotate/src/Main.java620
-rw-r--r--test/566-checker-signum/build20
-rw-r--r--test/566-checker-signum/smali/Main2.smali83
-rw-r--r--test/566-checker-signum/src-art/Main.java196
-rw-r--r--test/566-checker-signum/src/Main.java219
-rw-r--r--test/570-checker-osr/build20
-rw-r--r--test/583-checker-zero/build20
-rw-r--r--test/618-checker-induction/build20
-rw-r--r--test/618-checker-induction/src/Main.java20
-rw-r--r--test/626-checker-arm64-scratch-register/build20
-rw-r--r--test/626-checker-arm64-scratch-register/smali/Main2.smali1768
-rw-r--r--test/626-checker-arm64-scratch-register/src-art/Main.java23
-rw-r--r--test/626-checker-arm64-scratch-register/src/Main.java279
-rw-r--r--test/638-no-line-number/build5
-rw-r--r--test/638-no-line-number/expected.txt2
-rw-r--r--test/639-checker-code-sinking/src/Main.java31
-rw-r--r--test/646-checker-hadd-alt-char/build20
-rw-r--r--test/646-checker-hadd-alt-char/src/Main.java4
-rw-r--r--test/646-checker-hadd-alt-short/build20
-rw-r--r--test/646-checker-hadd-alt-short/src/Main.java12
-rw-r--r--test/646-checker-hadd-char/build20
-rw-r--r--test/646-checker-hadd-char/src/Main.java4
-rw-r--r--test/646-checker-hadd-short/build20
-rw-r--r--test/646-checker-hadd-short/src/Main.java20
-rw-r--r--test/660-checker-simd-sad-byte/build20
-rw-r--r--test/660-checker-simd-sad-byte/src/Main.java12
-rw-r--r--test/660-checker-simd-sad-char/build20
-rw-r--r--test/660-checker-simd-sad-char/src/Main.java12
-rw-r--r--test/660-checker-simd-sad-int/build20
-rw-r--r--test/660-checker-simd-sad-int/src/Main.java12
-rw-r--r--test/660-checker-simd-sad-short/build20
-rw-r--r--test/660-checker-simd-sad-short/src/Main.java24
-rw-r--r--test/660-checker-simd-sad-short2/build20
-rw-r--r--test/660-checker-simd-sad-short2/src/Main.java36
-rw-r--r--test/660-checker-simd-sad-short3/build20
-rw-r--r--test/660-checker-simd-sad-short3/src/Main.java32
-rw-r--r--test/661-checker-simd-reduc/build20
-rw-r--r--test/661-checker-simd-reduc/src/Main.java12
-rw-r--r--test/672-checker-throw-method/build20
-rw-r--r--test/672-checker-throw-method/src/Main.java8
-rw-r--r--test/673-checker-throw-vmethod/build20
-rw-r--r--test/673-checker-throw-vmethod/src/Main.java8
-rw-r--r--test/679-checker-minmax/src/Main.java204
-rw-r--r--test/704-multiply-accumulate/build20
-rw-r--r--test/706-checker-scheduler/build20
-rw-r--r--test/706-checker-scheduler/src/Main.java4
-rwxr-xr-xtest/712-varhandle-invocations/build6
-rw-r--r--test/715-clinit-implicit-parameter-annotations/build4
-rw-r--r--test/717-integer-value-of/expected.txt1
-rw-r--r--test/717-integer-value-of/info.txt2
-rw-r--r--test/717-integer-value-of/src/Main.java134
-rw-r--r--test/804-class-extends-itself/build (renamed from test/551-checker-shifter-operand/build)8
-rw-r--r--test/910-methods/check27
-rw-r--r--test/910-methods/expected.txt2
-rw-r--r--test/910-methods/expected_d8.diff4
-rw-r--r--test/911-get-stack-trace/expected.txt76
-rw-r--r--test/913-heaps/check22
-rw-r--r--test/913-heaps/expected.txt39
-rw-r--r--test/913-heaps/expected_d8.diff70
-rw-r--r--test/913-heaps/heaps.cc6
-rwxr-xr-xtest/952-invoke-custom/build75
-rw-r--r--test/952-invoke-custom/src/TestReturnValues.java330
-rw-r--r--test/952-invoke-custom/util-src/annotations/BootstrapMethod.java (renamed from test/952-invoke-custom/src/annotations/BootstrapMethod.java)0
-rw-r--r--test/952-invoke-custom/util-src/annotations/CalledByIndy.java (renamed from test/952-invoke-custom/src/annotations/CalledByIndy.java)0
-rw-r--r--test/952-invoke-custom/util-src/annotations/Constant.java (renamed from test/952-invoke-custom/src/annotations/Constant.java)0
-rw-r--r--test/952-invoke-custom/util-src/transformer/IndyTransformer.java (renamed from test/952-invoke-custom/src/transformer/IndyTransformer.java)7
-rw-r--r--test/956-methodhandles/expected.txt1
-rw-r--r--test/956-methodhandles/src/Main.java84
-rwxr-xr-xtest/961-default-iface-resolution-gen/build3
-rwxr-xr-xtest/964-default-iface-init-gen/build3
-rwxr-xr-xtest/979-const-method-handle/build74
-rw-r--r--test/Android.run-test.mk9
-rw-r--r--test/common/runtime_state.cc4
-rwxr-xr-xtest/etc/default-build158
-rwxr-xr-xtest/etc/run-test-jar3
-rw-r--r--test/knownfailures.json21
-rwxr-xr-xtest/run-test38
-rw-r--r--test/testrunner/env.py3
-rw-r--r--test/testrunner/target_config.py16
-rwxr-xr-xtest/testrunner/testrunner.py23
-rw-r--r--test/ti-agent/breakpoint_helper.cc2
-rw-r--r--test/ti-stress/stress.cc26
-rw-r--r--test/valgrind-suppressions.txt87
-rw-r--r--test/valgrind-target-suppressions.txt76
-rw-r--r--tools/ahat/Android.mk4
-rw-r--r--tools/ahat/AndroidTest.xml23
-rw-r--r--tools/ahat/etc/ahat-tests.mf2
-rw-r--r--tools/ahat/etc/ahat_api.txt49
-rw-r--r--tools/ahat/src/main/com/android/ahat/AsciiProgress.java69
-rw-r--r--tools/ahat/src/main/com/android/ahat/Main.java13
-rw-r--r--tools/ahat/src/main/com/android/ahat/ObjectHandler.java21
-rw-r--r--tools/ahat/src/main/com/android/ahat/Summarizer.java19
-rw-r--r--tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java413
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java27
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java70
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java5
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java173
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java8
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Instances.java4
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Parser.java159
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java70
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Reference.java9
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java2
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Value.java5
-rw-r--r--tools/ahat/src/main/com/android/ahat/progress/NullProgress.java28
-rw-r--r--tools/ahat/src/main/com/android/ahat/progress/Progress.java65
-rw-r--r--tools/ahat/src/test-dump/DumpedStuff.java14
-rw-r--r--tools/ahat/src/test-dump/Main.java2
-rw-r--r--tools/ahat/src/test/com/android/ahat/AhatTestSuite.java (renamed from tools/ahat/src/test/com/android/ahat/Tests.java)42
-rw-r--r--tools/ahat/src/test/com/android/ahat/DiffTest.java4
-rw-r--r--tools/ahat/src/test/com/android/ahat/DominatorsTest.java31
-rw-r--r--tools/ahat/src/test/com/android/ahat/InstanceTest.java64
-rw-r--r--tools/art14
-rw-r--r--tools/build/var_list3
-rwxr-xr-xtools/cleanup-buildbot-device.sh35
-rwxr-xr-xtools/desugar.sh95
-rw-r--r--tools/dexanalyze/Android.bp1
-rw-r--r--tools/dexanalyze/dexanalyze.cc40
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.cc547
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.h103
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.cc268
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.h71
-rw-r--r--tools/hiddenapi/hiddenapi.cc687
-rw-r--r--tools/hiddenapi/hiddenapi_test.cc69
-rwxr-xr-xtools/run-jdwp-tests.sh7
-rwxr-xr-xtools/teardown-buildbot-device.sh166
-rw-r--r--tools/ti-fast/tifast.cc26
-rw-r--r--tools/veridex/Android.mk4
-rw-r--r--tools/veridex/flow_analysis.cc5
-rw-r--r--tools/wrapagentproperties/wrapagentproperties.cc12
502 files changed, 18410 insertions, 11261 deletions
diff --git a/Android.mk b/Android.mk
index 08a1a105b8..1c946292ef 100644
--- a/Android.mk
+++ b/Android.mk
@@ -245,19 +245,6 @@ endif
test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist)
ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests
-# Valgrind.
-.PHONY: valgrind-test-art-host
-valgrind-test-art-host: valgrind-test-art-host-gtest
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-host32
-valgrind-test-art-host32: valgrind-test-art-host-gtest32
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-host64
-valgrind-test-art-host64: valgrind-test-art-host-gtest64
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
########################################################################
# target test rules
@@ -332,19 +319,6 @@ test-art-target-jit$(2ND_ART_PHONY_TEST_TARGET_SUFFIX): test-art-target-run-test
$(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
endif
-# Valgrind.
-.PHONY: valgrind-test-art-target
-valgrind-test-art-target: valgrind-test-art-target-gtest
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-target32
-valgrind-test-art-target32: valgrind-test-art-target-gtest32
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
-.PHONY: valgrind-test-art-target64
-valgrind-test-art-target64: valgrind-test-art-target-gtest64
- $(hide) $(call ART_TEST_PREREQ_FINISHED,$@)
-
#######################
# Fake packages for ART
diff --git a/build/Android.bp b/build/Android.bp
index 2a5598fb7a..b7d2cbc070 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -17,6 +17,27 @@ bootstrap_go_package {
pluginFor: ["soong_build"],
}
+art_clang_tidy_errors = [
+ // Protect scoped things like MutexLock.
+ "bugprone-unused-raii",
+]
+// Should be: strings.Join(art_clang_tidy_errors, ",").
+art_clang_tidy_errors_str = "bugprone-unused-raii"
+
+art_clang_tidy_disabled = [
+ "-google-default-arguments",
+ // We have local stores that are only used for debug checks.
+ "-clang-analyzer-deadcode.DeadStores",
+ // We are OK with some static globals and that they can, in theory, throw.
+ "-cert-err58-cpp",
+ // We have lots of C-style variadic functions, and are OK with them. JNI ensures
+ // that working around this warning would be extra-painful.
+ "-cert-dcl50-cpp",
+ // No exceptions.
+ "-misc-noexcept-move-constructor",
+ "-performance-noexcept-move-constructor",
+]
+
art_global_defaults {
// Additional flags are computed by art.go
@@ -127,23 +148,10 @@ art_global_defaults {
},
include_dirs: [
- "external/valgrind/include",
- "external/valgrind",
"external/vixl/src",
],
- tidy_checks: [
- "-google-default-arguments",
- // We have local stores that are only used for debug checks.
- "-clang-analyzer-deadcode.DeadStores",
- // We are OK with some static globals and that they can, in theory, throw.
- "-cert-err58-cpp",
- // We have lots of C-style variadic functions, and are OK with them. JNI ensures
- // that working around this warning would be extra-painful.
- "-cert-dcl50-cpp",
- // No exceptions.
- "-misc-noexcept-move-constructor",
- ],
+ tidy_checks: art_clang_tidy_errors + art_clang_tidy_disabled,
tidy_flags: [
// The static analyzer treats DCHECK as always enabled; we sometimes get
@@ -153,6 +161,8 @@ art_global_defaults {
// void foo() { CHECK(kIsFooEnabled); /* do foo... */ }
// not being marked noreturn if kIsFooEnabled is false.
"-extra-arg=-Wno-missing-noreturn",
+ // Use art_clang_tidy_errors for build errors.
+ "-warnings-as-errors=" + art_clang_tidy_errors_str,
],
}
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 7272661860..7d1115eec0 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -408,15 +408,9 @@ endif
ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST_RULES :=
ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST_RULES :=
ART_TEST_HOST_GTEST_DEPENDENCIES :=
ART_GTEST_TARGET_ANDROID_ROOT := '/system'
@@ -424,40 +418,6 @@ ifneq ($(ART_TEST_ANDROID_ROOT),)
ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
endif
-ART_VALGRIND_TARGET_DEPENDENCIES :=
-
-# Has to match list in external/valgrind/Android.build_one.mk
-ART_VALGRIND_SUPPORTED_ARCH := arm arm64 x86_64
-
-# Valgrind is not supported for x86
-ifneq (,$(filter $(ART_VALGRIND_SUPPORTED_ARCH),$(TARGET_ARCH)))
-art_vg_arch := $(if $(filter x86_64,$(TARGET_ARCH)),amd64,$(TARGET_ARCH))
-ART_VALGRIND_TARGET_DEPENDENCIES += \
- $(TARGET_OUT_EXECUTABLES)/valgrind \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(art_vg_arch)-linux \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(art_vg_arch)-linux.so \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(art_vg_arch)-linux.so \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/default.supp
-art_vg_arch :=
-endif
-
-ifdef TARGET_2ND_ARCH
-ifneq (,$(filter $(ART_VALGRIND_SUPPORTED_ARCH),$(TARGET_2ND_ARCH)))
-ART_VALGRIND_TARGET_DEPENDENCIES += \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/memcheck-$(TARGET_2ND_ARCH)-linux \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_core-$(TARGET_2ND_ARCH)-linux.so \
- $(TARGET_OUT_SHARED_LIBRARIES)/valgrind/vgpreload_memcheck-$(TARGET_2ND_ARCH)-linux.so
-endif
-endif
-
-include $(CLEAR_VARS)
-LOCAL_MODULE := valgrind-target-suppressions.txt
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_TAGS := optional
-LOCAL_SRC_FILES := test/valgrind-target-suppressions.txt
-LOCAL_MODULE_PATH := $(ART_TARGET_TEST_OUT)
-include $(BUILD_PREBUILT)
-
# Define a make rule for a target device gtest.
# $(1): gtest name - the name of the test we're building such as leb128_test.
# $(2): path relative to $OUT to the test binary
@@ -487,11 +447,10 @@ define define-art-gtest-rule-target
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
$$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
- $$(ART_TARGET_TEST_OUT)/valgrind-target-suppressions.txt
+ $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
-$$(gtest_rule) valgrind-$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
-$$(gtest_rule) valgrind-$$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
+$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
+$$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
# File witnessing the success of the gtest, the presence of which means the gtest's success.
gtest_witness := \
@@ -516,37 +475,7 @@ $$(gtest_rule): test-art-target-sync
ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule)
ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule)
-# File witnessing the success of the Valgrind gtest, the presence of which means the gtest's
-# success.
-valgrind_gtest_witness := \
- $$(maybe_art_test_chroot)$(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/valgrind-$$(gtest_rule)-$$$$PPID
-
-valgrind-$$(gtest_rule): PRIVATE_VALGRIND_GTEST_WITNESS := $$(valgrind_gtest_witness)
-
-.PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-sync
- $(hide) adb shell touch $$(PRIVATE_VALGRIND_GTEST_WITNESS)
- $(hide) adb shell rm $$(PRIVATE_VALGRIND_GTEST_WITNESS)
- $(hide) adb shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
- $(hide) $$(call ART_TEST_SKIP,$$@) && \
- (adb shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
- ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
- $(ART_GTEST_TARGET_ANDROID_ROOT)/bin/valgrind \
- --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
- --suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
- --num-callers=50 --show-mismatched-frees=no $$(PRIVATE_TARGET_EXE) \
- && touch $$(PRIVATE_VALGRIND_GTEST_WITNESS)" \
- && (adb pull $$(PRIVATE_VALGRIND_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
- || $$(call ART_TEST_FAILED,$$@))
- $(hide) rm -f /tmp/$$@-$$$$PPID
-
- ART_TEST_TARGET_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += \
- valgrind-$$(gtest_rule)
- ART_TEST_TARGET_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule)
- ART_TEST_TARGET_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule)
-
# Clear locally defined variables.
- valgrind_gtest_witness :=
gtest_witness :=
maybe_chroot_command :=
maybe_art_test_chroot :=
@@ -555,16 +484,6 @@ valgrind-$$(gtest_rule): $(ART_VALGRIND_TARGET_DEPENDENCIES) test-art-target-syn
gtest_rule :=
endef # define-art-gtest-rule-target
-ART_VALGRIND_DEPENDENCIES := \
- $(HOST_OUT_EXECUTABLES)/valgrind \
- $(HOST_OUT)/lib64/valgrind/memcheck-amd64-linux \
- $(HOST_OUT)/lib64/valgrind/memcheck-x86-linux \
- $(HOST_OUT)/lib64/valgrind/default.supp \
- $(HOST_OUT)/lib64/valgrind/vgpreload_core-amd64-linux.so \
- $(HOST_OUT)/lib64/valgrind/vgpreload_core-x86-linux.so \
- $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-amd64-linux.so \
- $(HOST_OUT)/lib64/valgrind/vgpreload_memcheck-x86-linux.so
-
# Define make rules for a host gtests.
# $(1): gtest name - the name of the test we're building such as leb128_test.
# $(2): path relative to $OUT to the test binary
@@ -616,19 +535,6 @@ endif
ART_TEST_HOST_GTEST_$(1)_RULES += $$(gtest_rule)
-.PHONY: valgrind-$$(gtest_rule)
-valgrind-$$(gtest_rule): $$(gtest_exe) $$(gtest_deps) $(ART_VALGRIND_DEPENDENCIES)
- $(hide) $$(call ART_TEST_SKIP,$$@) && \
- VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \
- $(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \
- --suppressions=art/test/valgrind-suppressions.txt --num-callers=50 \
- $$< && \
- $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
-
- ART_TEST_HOST_VALGRIND_GTEST$$($(3)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule)
- ART_TEST_HOST_VALGRIND_GTEST_RULES += valgrind-$$(gtest_rule)
- ART_TEST_HOST_VALGRIND_GTEST_$(1)_RULES += valgrind-$$(gtest_rule)
-
# Clear locally defined variables.
gtest_deps :=
gtest_exe :=
@@ -661,7 +567,6 @@ define define-art-gtest-target
ifndef ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES
ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
- ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
endif
$$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),$$(art_gtest_filename),$(2),$$($(2)library_path)))
@@ -681,7 +586,6 @@ define define-art-gtest-host
art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename)))
ifndef ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES
ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
- ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
endif
$$(eval $$(call define-art-gtest-rule-host,$$(art_gtest_name),$$(art_gtest_filename),$(2)))
@@ -700,13 +604,8 @@ define define-art-gtest-target-both
test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES)
$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-.PHONY: valgrind-test-art-target-gtest-$$(art_gtest_name)
-valgrind-test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES)
- $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
# Clear now unused variables.
ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
- ART_TEST_TARGET_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
art_gtest_name :=
endef # define-art-gtest-target-both
@@ -719,13 +618,8 @@ define define-art-gtest-host-both
test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES)
$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-.PHONY: valgrind-test-art-host-gtest-$$(art_gtest_name)
-valgrind-test-art-host-gtest-$$(art_gtest_name): $$(ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES)
- $$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
# Clear now unused variables.
ART_TEST_HOST_GTEST_$$(art_gtest_name)_RULES :=
- ART_TEST_HOST_VALGRIND_GTEST_$$(art_gtest_name)_RULES :=
art_gtest_name :=
endef # define-art-gtest-host-both
@@ -751,12 +645,11 @@ RUNTIME_TARGET_GTEST_MAKE_TARGETS :=
$(foreach file, $(ART_TARGET_GTEST_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(patsubst %/,%,$$(dir $$(file))))_$$(notdir $$(basename $$(file)))))
COMPILER_TARGET_GTEST_MAKE_TARGETS :=
-# Define all the combinations of host/target, valgrind and suffix such as:
-# test-art-host-gtest or valgrind-test-art-host-gtest64
+# Define all the combinations of host/target and suffix such as:
+# test-art-host-gtest or test-art-host-gtest64
# $(1): host or target
# $(2): HOST or TARGET
-# $(3): valgrind- or undefined
-# $(4): undefined, 32 or 64
+# $(3): undefined, 32 or 64
define define-test-art-gtest-combination
ifeq ($(1),host)
ifneq ($(2),HOST)
@@ -771,15 +664,11 @@ define define-test-art-gtest-combination
endif
endif
- rule_name := $(3)test-art-$(1)-gtest$(4)
- ifeq ($(3),valgrind-)
- dependencies := $$(ART_TEST_$(2)_VALGRIND_GTEST$(4)_RULES)
- else
- dependencies := $$(ART_TEST_$(2)_GTEST$(4)_RULES)
- endif
+ rule_name := test-art-$(1)-gtest$(3)
+ dependencies := $$(ART_TEST_$(2)_GTEST$(3)_RULES)
.PHONY: $$(rule_name)
-$$(rule_name): $$(dependencies) dx d8-compat-dx desugar
+$$(rule_name): $$(dependencies) d8 d8-compat-dx
$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
# Clear locally defined variables.
@@ -787,21 +676,15 @@ $$(rule_name): $$(dependencies) dx d8-compat-dx desugar
dependencies :=
endef # define-test-art-gtest-combination
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(ART_PHONY_TEST_TARGET_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,$(ART_PHONY_TEST_TARGET_SUFFIX)))
ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX
-$(eval $(call define-test-art-gtest-combination,target,TARGET,,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,target,TARGET,valgrind-,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,target,TARGET,$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)))
endif
-$(eval $(call define-test-art-gtest-combination,host,HOST,,))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,))
-$(eval $(call define-test-art-gtest-combination,host,HOST,,$(ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(ART_PHONY_TEST_HOST_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,host,HOST,))
+$(eval $(call define-test-art-gtest-combination,host,HOST,$(ART_PHONY_TEST_HOST_SUFFIX)))
ifneq ($(HOST_PREFER_32_BIT),true)
-$(eval $(call define-test-art-gtest-combination,host,HOST,,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
-$(eval $(call define-test-art-gtest-combination,host,HOST,valgrind-,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
+$(eval $(call define-test-art-gtest-combination,host,HOST,$(2ND_ART_PHONY_TEST_HOST_SUFFIX)))
endif
# Clear locally defined variables.
@@ -818,15 +701,9 @@ COMPILER_GTEST_HOST_SRC_FILES :=
ART_TEST_HOST_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
ART_TEST_HOST_GTEST_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_VALGRIND_GTEST_RULES :=
ART_TEST_TARGET_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
ART_TEST_TARGET_GTEST_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_VALGRIND_GTEST_RULES :=
ART_GTEST_TARGET_ANDROID_ROOT :=
ART_GTEST_class_linker_test_DEX_DEPS :=
ART_GTEST_class_table_test_DEX_DEPS :=
@@ -865,8 +742,6 @@ ART_GTEST_transaction_test_DEX_DEPS :=
ART_GTEST_dex2oat_environment_tests_DEX_DEPS :=
ART_GTEST_heap_verification_test_DEX_DEPS :=
ART_GTEST_verifier_deps_test_DEX_DEPS :=
-ART_VALGRIND_DEPENDENCIES :=
-ART_VALGRIND_TARGET_DEPENDENCIES :=
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_TARGET_GTEST_$(dir)_DEX :=))
$(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
ART_TEST_HOST_GTEST_MainStripped_DEX :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 517ac5c28d..08b1e10268 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -37,11 +37,9 @@ else
endif
# Use dex2oat debug version for better error reporting
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
# $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# $(3): wrapper, e.g., valgrind.
-# $(4): dex2oat suffix, e.g, valgrind requires 32 right now.
-# $(5): multi-image.
+# $(3): multi-image.
# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
# run-test --no-image
define create-core-oat-host-rules
@@ -65,11 +63,11 @@ define create-core-oat-host-rules
endif
ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
#Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
+ $$(error found $(1) expected interpreter, interp-ac, or optimizing)
endif
- # If $(5) is true, generate a multi-image.
- ifeq ($(5),true)
+ # If $(3) is true, generate a multi-image.
+ ifeq ($(3),true)
core_multi_infix := -multi
core_multi_param := --multi-image --no-inline-from=core-oj-hostdex.jar
core_multi_group := _multi
@@ -79,22 +77,18 @@ define create-core-oat-host-rules
core_multi_group :=
endif
- core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_IMG_SUFFIX)
- core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(3)$(CORE_OAT_SUFFIX)
+ core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$$(core_multi_infix)$(CORE_OAT_SUFFIX)
# Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
ifeq ($(2),)
- $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
+ HOST_CORE_IMAGE_$(1)$$(core_multi_group)_64 := $$(core_image_name)
else
- $(3)HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
+ HOST_CORE_IMAGE_$(1)$$(core_multi_group)_32 := $$(core_image_name)
endif
- $(3)HOST_CORE_IMG_OUTS += $$(core_image_name)
- $(3)HOST_CORE_OAT_OUTS += $$(core_oat_name)
+ HOST_CORE_IMG_OUTS += $$(core_image_name)
+ HOST_CORE_OAT_OUTS += $$(core_oat_name)
- # If we have a wrapper, make the target phony.
- ifneq ($(3),)
-.PHONY: $$(core_image_name)
- endif
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
@@ -102,7 +96,7 @@ $$(core_image_name): PRIVATE_CORE_MULTI_PARAM := $$(core_multi_param)
$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@"
@mkdir -p $$(dir $$@)
- $$(hide) $(3) $$(DEX2OAT)$(4) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+ $$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
@@ -124,35 +118,27 @@ $$(core_oat_name): $$(core_image_name)
core_infix :=
endef # create-core-oat-host-rules
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
-# $(2): wrapper.
-# $(3): dex2oat suffix.
-# $(4): multi-image.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
+# $(2): multi-image.
define create-core-oat-host-rule-combination
- $(call create-core-oat-host-rules,$(1),,$(2),$(3),$(4))
+ $(call create-core-oat-host-rules,$(1),,$(2))
ifneq ($(HOST_PREFER_32_BIT),true)
- $(call create-core-oat-host-rules,$(1),2ND_,$(2),$(3),$(4))
+ $(call create-core-oat-host-rules,$(1),2ND_,$(2))
endif
endef
-$(eval $(call create-core-oat-host-rule-combination,optimizing,,,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,,,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,false))
-$(eval $(call create-core-oat-host-rule-combination,optimizing,,,true))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,,,true))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,,,true))
-
-valgrindHOST_CORE_IMG_OUTS :=
-valgrindHOST_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-host-rule-combination,optimizing,valgrind,32,false))
-$(eval $(call create-core-oat-host-rule-combination,interpreter,valgrind,32,false))
-$(eval $(call create-core-oat-host-rule-combination,interp-ac,valgrind,32,false))
-
-valgrind-test-art-host-dex2oat-host: $(valgrindHOST_CORE_IMG_OUTS)
+$(eval $(call create-core-oat-host-rule-combination,optimizing,false))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,false))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,false))
+$(eval $(call create-core-oat-host-rule-combination,optimizing,true))
+$(eval $(call create-core-oat-host-rule-combination,interpreter,true))
+$(eval $(call create-core-oat-host-rule-combination,interp-ac,true))
test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
+# $(2): 2ND_ or undefined
define create-core-oat-target-rules
core_compile_options :=
core_image_name :=
@@ -176,36 +162,32 @@ define create-core-oat-target-rules
endif
ifneq ($(filter-out interpreter interp-ac optimizing,$(1)),)
# Technically this test is not precise, but hopefully good enough.
- $$(error found $(1) expected interpreter, interpreter-access-checks, or optimizing)
+ $$(error found $(1) expected interpreter, interp-ac, or optimizing)
endif
- core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(3)$(CORE_IMG_SUFFIX)
- core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(3)$(CORE_OAT_SUFFIX)
+ core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
+ core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
# Using the bitness suffix makes it easier to add as a dependency for the run-test mk.
ifeq ($(2),)
ifdef TARGET_2ND_ARCH
- $(3)TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name)
+ TARGET_CORE_IMAGE_$(1)_64 := $$(core_image_name)
else
- $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
+ TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
endif
else
- $(3)TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
+ TARGET_CORE_IMAGE_$(1)_32 := $$(core_image_name)
endif
- $(3)TARGET_CORE_IMG_OUTS += $$(core_image_name)
- $(3)TARGET_CORE_OAT_OUTS += $$(core_oat_name)
+ TARGET_CORE_IMG_OUTS += $$(core_image_name)
+ TARGET_CORE_OAT_OUTS += $$(core_oat_name)
- # If we have a wrapper, make the target phony.
- ifneq ($(3),)
-.PHONY: $$(core_image_name)
- endif
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
@echo "target dex2oat: $$@"
@mkdir -p $$(dir $$@)
- $$(hide) $(4) $$(DEX2OAT)$(5) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+ $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
@@ -228,30 +210,18 @@ $$(core_oat_name): $$(core_image_name)
core_infix :=
endef # create-core-oat-target-rules
-# $(1): compiler - optimizing, interpreter or interpreter-access-checks.
-# $(2): wrapper.
-# $(3): dex2oat suffix.
+# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
define create-core-oat-target-rule-combination
- $(call create-core-oat-target-rules,$(1),,$(2),$(3))
+ $(call create-core-oat-target-rules,$(1),)
ifdef TARGET_2ND_ARCH
- $(call create-core-oat-target-rules,$(1),2ND_,$(2),$(3))
+ $(call create-core-oat-target-rules,$(1),2ND_)
endif
endef
-$(eval $(call create-core-oat-target-rule-combination,optimizing,,))
-$(eval $(call create-core-oat-target-rule-combination,interpreter,,))
-$(eval $(call create-core-oat-target-rule-combination,interp-ac,,))
-
-valgrindTARGET_CORE_IMG_OUTS :=
-valgrindTARGET_CORE_OAT_OUTS :=
-$(eval $(call create-core-oat-target-rule-combination,optimizing,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,interpreter,valgrind,32))
-$(eval $(call create-core-oat-target-rule-combination,interp-ac,valgrind,32))
-
-valgrind-test-art-host-dex2oat-target: $(valgrindTARGET_CORE_IMG_OUTS)
-
-valgrind-test-art-host-dex2oat: valgrind-test-art-host-dex2oat-host valgrind-test-art-host-dex2oat-target
+$(eval $(call create-core-oat-target-rule-combination,optimizing))
+$(eval $(call create-core-oat-target-rule-combination,interpreter))
+$(eval $(call create-core-oat-target-rule-combination,interp-ac))
# Define a default core image that can be used for things like gtests that
# need some image to run, but don't otherwise care which image is used.
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 235a2aa90e..a52e16328a 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -371,7 +371,7 @@ TEST_F(CmdlineParserTest, DISABLED_TestXGcOption) {
*/
TEST_F(CmdlineParserTest, TestJdwpProviderEmpty) {
{
- EXPECT_SINGLE_PARSE_DEFAULT_VALUE(JdwpProvider::kNone, "", M::JdwpProvider);
+ EXPECT_SINGLE_PARSE_DEFAULT_VALUE(JdwpProvider::kUnset, "", M::JdwpProvider);
}
} // TEST_F
diff --git a/cmdline/unit.h b/cmdline/unit.h
index ad6a03d12f..f73981fbd3 100644
--- a/cmdline/unit.h
+++ b/cmdline/unit.h
@@ -21,8 +21,9 @@ namespace art {
// Used for arguments that simply indicate presence (e.g. "-help") without any values.
struct Unit {
- // Avoid 'Conditional jump or move depends on uninitialised value(s)' errors
- // when running valgrind by specifying a user-defined constructor.
+ // Historical note: We specified a user-defined constructor to avoid
+ // 'Conditional jump or move depends on uninitialised value(s)' errors
+ // when running Valgrind.
Unit() {}
Unit(const Unit&) = default;
~Unit() {}
diff --git a/compiler/Android.bp b/compiler/Android.bp
index be963fbbdb..e1d382f6f4 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -63,6 +63,7 @@ art_cc_defaults {
"optimizing/inliner.cc",
"optimizing/instruction_builder.cc",
"optimizing/instruction_simplifier.cc",
+ "optimizing/intrinsic_objects.cc",
"optimizing/intrinsics.cc",
"optimizing/licm.cc",
"optimizing/linear_order.cc",
@@ -160,6 +161,7 @@ art_cc_defaults {
"utils/x86/assembler_x86.cc",
"utils/x86/jni_macro_assembler_x86.cc",
"utils/x86/managed_register_x86.cc",
+ "optimizing/instruction_simplifier_x86.cc",
],
},
x86_64: {
@@ -345,6 +347,7 @@ art_cc_test {
"optimizing/parallel_move_test.cc",
"optimizing/pretty_printer_test.cc",
"optimizing/reference_type_propagation_test.cc",
+ "optimizing/select_generator_test.cc",
"optimizing/side_effects_test.cc",
"optimizing/ssa_liveness_analysis_test.cc",
"optimizing/ssa_test.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index a7f16d394e..e8e1d408ef 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -16,6 +16,8 @@
#include "common_compiler_test.h"
+#include <type_traits>
+
#include "arch/instruction_set_features.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -29,6 +31,7 @@
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "jni/java_vm_ext.h"
#include "interpreter/interpreter.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -37,6 +40,7 @@
#include "oat_quick_method_header.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
+#include "utils/atomic_dex_ref_map-inl.h"
namespace art {
@@ -79,6 +83,7 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size;
chunk->reserve(size + max_padding);
chunk->resize(sizeof(method_header));
+ static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
chunk->insert(chunk->begin(), method_info.begin(), method_info.end());
@@ -131,11 +136,10 @@ void CommonCompilerTest::MakeExecutable(ObjPtr<mirror::ClassLoader> class_loader
}
}
-// Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
-// driver assumes ownership of the set, so the test should properly release the set.
-std::unordered_set<std::string>* CommonCompilerTest::GetImageClasses() {
+// Get the set of image classes given to the compiler options in SetUp.
+std::unique_ptr<HashSet<std::string>> CommonCompilerTest::GetImageClasses() {
// Empty set: by default no classes are retained in the image.
- return new std::unordered_set<std::string>();
+ return std::make_unique<HashSet<std::string>>();
}
// Get ProfileCompilationInfo that should be passed to the driver.
@@ -149,11 +153,7 @@ void CommonCompilerTest::SetUp() {
{
ScopedObjectAccess soa(Thread::Current());
- const InstructionSet instruction_set = kRuntimeISA;
- // Take the default set of instruction features from the build.
- instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
-
- runtime_->SetInstructionSet(instruction_set);
+ runtime_->SetInstructionSet(instruction_set_);
for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
CalleeSaveType type = CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
@@ -161,26 +161,51 @@ void CommonCompilerTest::SetUp() {
}
}
- CreateCompilerDriver(compiler_kind_, instruction_set);
+ CreateCompilerDriver();
+ }
+}
+
+void CommonCompilerTest::ApplyInstructionSet() {
+ // Copy local instruction_set_ and instruction_set_features_ to *compiler_options_;
+ CHECK(instruction_set_features_ != nullptr);
+ if (instruction_set_ == InstructionSet::kThumb2) {
+ CHECK_EQ(InstructionSet::kArm, instruction_set_features_->GetInstructionSet());
+ } else {
+ CHECK_EQ(instruction_set_, instruction_set_features_->GetInstructionSet());
+ }
+ compiler_options_->instruction_set_ = instruction_set_;
+ compiler_options_->instruction_set_features_ =
+ InstructionSetFeatures::FromBitmap(instruction_set_, instruction_set_features_->AsBitmap());
+ CHECK(compiler_options_->instruction_set_features_->Equals(instruction_set_features_.get()));
+}
+
+void CommonCompilerTest::OverrideInstructionSetFeatures(InstructionSet instruction_set,
+ const std::string& variant) {
+ instruction_set_ = instruction_set;
+ std::string error_msg;
+ instruction_set_features_ =
+ InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg);
+ CHECK(instruction_set_features_ != nullptr) << error_msg;
+
+ if (compiler_options_ != nullptr) {
+ ApplyInstructionSet();
}
}
-void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind,
- InstructionSet isa,
- size_t number_of_threads) {
+void CommonCompilerTest::CreateCompilerDriver() {
+ ApplyInstructionSet();
+
compiler_options_->boot_image_ = true;
+ compiler_options_->compile_pic_ = false; // Non-PIC boot image is a test configuration.
compiler_options_->SetCompilerFilter(GetCompilerFilter());
+ compiler_options_->image_classes_.swap(*GetImageClasses());
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
- kind,
- isa,
- instruction_set_features_.get(),
- GetImageClasses(),
- number_of_threads,
+ compiler_kind_,
+ &compiler_options_->image_classes_,
+ number_of_threads_,
/* swap_fd */ -1,
GetProfileCompilationInfo()));
- // We typically don't generate an image in unit tests, disable this optimization by default.
- compiler_driver_->SetSupportBootImageFixup(false);
}
void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
@@ -202,11 +227,6 @@ void CommonCompilerTest::SetCompilerKind(Compiler::Kind compiler_kind) {
compiler_kind_ = compiler_kind;
}
-InstructionSet CommonCompilerTest::GetInstructionSet() const {
- DCHECK(compiler_driver_.get() != nullptr);
- return compiler_driver_->GetInstructionSet();
-}
-
void CommonCompilerTest::TearDown() {
compiler_driver_.reset();
callbacks_.reset();
@@ -232,9 +252,49 @@ void CommonCompilerTest::CompileClass(mirror::ClassLoader* class_loader, const c
void CommonCompilerTest::CompileMethod(ArtMethod* method) {
CHECK(method != nullptr);
- TimingLogger timings("CommonTest::CompileMethod", false, false);
+ TimingLogger timings("CommonCompilerTest::CompileMethod", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
- compiler_driver_->CompileOne(Thread::Current(), method, &timings);
+ {
+ Thread* self = Thread::Current();
+ jobject class_loader = self->GetJniEnv()->GetVm()->AddGlobalRef(self, method->GetClassLoader());
+
+ DCHECK(!Runtime::Current()->IsStarted());
+ const DexFile* dex_file = method->GetDexFile();
+ uint16_t class_def_idx = method->GetClassDefIndex();
+ uint32_t method_idx = method->GetDexMethodIndex();
+ uint32_t access_flags = method->GetAccessFlags();
+ InvokeType invoke_type = method->GetInvokeType();
+ StackHandleScope<2> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
+ Handle<mirror::ClassLoader> h_class_loader = hs.NewHandle(
+ self->DecodeJObject(class_loader)->AsClassLoader());
+ const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
+
+ std::vector<const DexFile*> dex_files;
+ dex_files.push_back(dex_file);
+
+ // Go to native so that we don't block GC during compilation.
+ ScopedThreadSuspension sts(self, kNative);
+
+ compiler_driver_->InitializeThreadPools();
+
+ compiler_driver_->PreCompile(class_loader, dex_files, &timings);
+
+ compiler_driver_->CompileOne(self,
+ class_loader,
+ *dex_file,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ invoke_type,
+ code_item,
+ dex_cache,
+ h_class_loader);
+
+ compiler_driver_->FreeThreadPools();
+
+ self->GetJniEnv()->DeleteGlobalRef(class_loader);
+ }
TimingLogger::ScopedTiming t2("MakeExecutable", &timings);
MakeExecutable(method);
}
@@ -288,4 +348,14 @@ void CommonCompilerTest::UnreserveImageSpace() {
image_reservation_.reset();
}
+void CommonCompilerTest::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
+ compiler_options_->dex_files_for_oat_file_ = dex_files;
+ compiler_driver_->compiled_classes_.AddDexFiles(dex_files);
+ compiler_driver_->dex_to_dex_compiler_.SetDexFiles(dex_files);
+}
+
+void CommonCompilerTest::ClearBootImageOption() {
+ compiler_options_->boot_image_ = false;
+}
+
} // namespace art
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 39c8bd817b..db38110400 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -18,9 +18,11 @@
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
#include <list>
-#include <unordered_set>
#include <vector>
+#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
+#include "base/hash_set.h"
#include "common_runtime_test.h"
#include "compiler.h"
#include "oat_file.h"
@@ -33,6 +35,7 @@ class ClassLoader;
class CompilerDriver;
class CompilerOptions;
class CumulativeLogger;
+class DexFile;
class ProfileCompilationInfo;
class VerificationResults;
@@ -54,18 +57,15 @@ class CommonCompilerTest : public CommonRuntimeTest {
REQUIRES_SHARED(Locks::mutator_lock_);
protected:
- virtual void SetUp();
+ void SetUp() OVERRIDE;
- virtual void SetUpRuntimeOptions(RuntimeOptions* options);
+ void SetUpRuntimeOptions(RuntimeOptions* options) OVERRIDE;
Compiler::Kind GetCompilerKind() const;
void SetCompilerKind(Compiler::Kind compiler_kind);
- InstructionSet GetInstructionSet() const;
-
- // Get the set of image classes given to the compiler-driver in SetUp. Note: the compiler
- // driver assumes ownership of the set, so the test should properly release the set.
- virtual std::unordered_set<std::string>* GetImageClasses();
+ // Get the set of image classes given to the compiler-driver in SetUp.
+ virtual std::unique_ptr<HashSet<std::string>> GetImageClasses();
virtual ProfileCompilationInfo* GetProfileCompilationInfo();
@@ -73,7 +73,7 @@ class CommonCompilerTest : public CommonRuntimeTest {
return CompilerFilter::kDefaultCompilerFilter;
}
- virtual void TearDown();
+ void TearDown() OVERRIDE;
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -88,18 +88,31 @@ class CommonCompilerTest : public CommonRuntimeTest {
const char* method_name, const char* signature)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U);
+ void ApplyInstructionSet();
+ void OverrideInstructionSetFeatures(InstructionSet instruction_set, const std::string& variant);
+
+ void CreateCompilerDriver();
void ReserveImageSpace();
void UnreserveImageSpace();
+ void SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files);
+
+ void ClearBootImageOption();
+
Compiler::Kind compiler_kind_ = Compiler::kOptimizing;
+ size_t number_of_threads_ = 2u;
+
+ InstructionSet instruction_set_ =
+ (kRuntimeISA == InstructionSet::kArm) ? InstructionSet::kThumb2 : kRuntimeISA;
+ // Take the default set of instruction features from the build.
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features_
+ = InstructionSetFeatures::FromCppDefines();
+
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<CompilerDriver> compiler_driver_;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
-
private:
std::unique_ptr<MemMap> image_reservation_;
diff --git a/compiler/compiler.h b/compiler/compiler.h
index f2ec3a9fa3..ef3d87f02b 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -39,12 +39,6 @@ template<class T> class Handle;
class OatWriter;
class Thread;
-enum class CopyOption {
- kNever,
- kAlways,
- kOnlyIfCompressed
-};
-
class Compiler {
public:
enum Kind {
diff --git a/compiler/debug/dwarf/dwarf_test.h b/compiler/debug/dwarf/dwarf_test.h
index 9a7c604ca1..6b039a7b5f 100644
--- a/compiler/debug/dwarf/dwarf_test.h
+++ b/compiler/debug/dwarf/dwarf_test.h
@@ -28,7 +28,7 @@
#include "base/os.h"
#include "base/unix_file/fd_file.h"
-#include "common_runtime_test.h"
+#include "common_compiler_test.h"
#include "gtest/gtest.h"
#include "linker/elf_builder.h"
#include "linker/file_output_stream.h"
@@ -39,7 +39,7 @@ namespace dwarf {
#define DW_CHECK(substring) Check(substring, false, __FILE__, __LINE__)
#define DW_CHECK_NEXT(substring) Check(substring, true, __FILE__, __LINE__)
-class DwarfTest : public CommonRuntimeTest {
+class DwarfTest : public CommonCompilerTest {
public:
static constexpr bool kPrintObjdumpOutput = false; // debugging.
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index f2002a0af6..bda7108c74 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -208,10 +208,8 @@ class ElfCompilationUnitWriter {
std::vector<DexRegisterMap> dex_reg_maps;
if (accessor.HasCodeItem() && mi->code_info != nullptr) {
code_info.reset(new CodeInfo(mi->code_info));
- for (size_t s = 0; s < code_info->GetNumberOfStackMaps(); ++s) {
- const StackMap stack_map = code_info->GetStackMapAt(s);
- dex_reg_maps.push_back(code_info->GetDexRegisterMapOf(
- stack_map, accessor.RegistersSize()));
+ for (StackMap stack_map : code_info->GetStackMaps()) {
+ dex_reg_maps.push_back(code_info->GetDexRegisterMapOf(stack_map));
}
}
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index a7adab5506..3d78943cd0 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -101,9 +101,7 @@ class ElfDebugLineWriter {
// Use stack maps to create mapping table from pc to dex.
const CodeInfo code_info(mi->code_info);
pc2dex_map.reserve(code_info.GetNumberOfStackMaps());
- for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
- StackMap stack_map = code_info.GetStackMapAt(s);
- DCHECK(stack_map.IsValid());
+ for (StackMap stack_map : code_info.GetStackMaps()) {
const uint32_t pc = stack_map.GetNativePcOffset(isa);
const int32_t dex = stack_map.GetDexPc();
pc2dex_map.push_back({pc, dex});
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 8cb4e55bbc..b663291b4d 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -147,11 +147,11 @@ static std::vector<VariableLocation> GetVariableLocations(
DexRegisterLocation reg_hi = DexRegisterLocation::None();
DCHECK_LT(stack_map_index, dex_register_maps.size());
DexRegisterMap dex_register_map = dex_register_maps[stack_map_index];
- DCHECK(dex_register_map.IsValid());
+ DCHECK(!dex_register_map.empty());
CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item);
- reg_lo = dex_register_map.GetDexRegisterLocation(vreg);
+ reg_lo = dex_register_map[vreg];
if (is64bitValue) {
- reg_hi = dex_register_map.GetDexRegisterLocation(vreg + 1);
+ reg_hi = dex_register_map[vreg + 1];
}
// Add location entry for this address range.
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index fb6a72b1c5..fcaa0cdd07 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -31,6 +31,7 @@
#include "dex/dex_instruction-inl.h"
#include "dex_to_dex_decompiler.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "mirror/dex_cache.h"
#include "quicken_info.h"
@@ -609,7 +610,7 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
}
// Create a `CompiledMethod`, with the quickened information in the vmap table.
- InstructionSet instruction_set = driver_->GetInstructionSet();
+ InstructionSet instruction_set = driver_->GetCompilerOptions().GetInstructionSet();
if (instruction_set == InstructionSet::kThumb2) {
// Don't use the thumb2 instruction set to avoid the one off code delta.
instruction_set = InstructionSet::kArm;
diff --git a/compiler/dex/dex_to_dex_decompiler_test.cc b/compiler/dex/dex_to_dex_decompiler_test.cc
index 1fe42ad531..4f83d605a3 100644
--- a/compiler/dex/dex_to_dex_decompiler_test.cc
+++ b/compiler/dex/dex_to_dex_decompiler_test.cc
@@ -16,6 +16,7 @@
#include "dex_to_dex_decompiler.h"
+#include "base/casts.h"
#include "class_linker.h"
#include "common_compiler_test.h"
#include "compiled_method-inl.h"
@@ -26,6 +27,7 @@
#include "driver/compiler_options.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
+#include "quick_compiler_callbacks.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread.h"
@@ -43,9 +45,9 @@ class DexToDexDecompilerTest : public CommonCompilerTest {
compiler_options_->SetCompilerFilter(CompilerFilter::kQuicken);
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
- Runtime::Current()->GetCompilerCallbacks()->SetVerifierDeps(
+ down_cast<QuickCompilerCallbacks*>(Runtime::Current()->GetCompilerCallbacks())->SetVerifierDeps(
new verifier::VerifierDeps(GetDexFiles(class_loader)));
- compiler_driver_->SetDexFilesForOatFile(GetDexFiles(class_loader));
+ SetDexFilesForOatFile(GetDexFiles(class_loader));
compiler_driver_->CompileAll(class_loader, GetDexFiles(class_loader), &timings);
}
@@ -82,9 +84,8 @@ class DexToDexDecompilerTest : public CommonCompilerTest {
ASSERT_NE(0, cmp);
// Unquicken the dex file.
- for (uint32_t i = 0; i < updated_dex_file->NumClassDefs(); ++i) {
+ for (ClassAccessor accessor : updated_dex_file->GetClasses()) {
// Unquicken each method.
- ClassAccessor accessor(*updated_dex_file, updated_dex_file->GetClassDef(i));
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
CompiledMethod* compiled_method = compiler_driver_->GetCompiledMethod(
method.GetReference());
diff --git a/compiler/dex/inline_method_analyser.cc b/compiler/dex/inline_method_analyser.cc
index dc044c1210..fe8b766d0f 100644
--- a/compiler/dex/inline_method_analyser.cc
+++ b/compiler/dex/inline_method_analyser.cc
@@ -724,7 +724,8 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method,
return false;
}
DCHECK_GE(field->GetOffset().Int32Value(), 0);
- // Do not interleave function calls with bit field writes to placate valgrind. Bug: 27552451.
+ // Historical note: We made sure not to interleave function calls with bit field writes to
+ // placate Valgrind. Bug: 27552451.
uint32_t field_offset = field->GetOffset().Uint32Value();
bool is_volatile = field->IsVolatile();
result->field_idx = field_idx;
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index aa8277edb4..d56b135aca 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "base/utils.h"
#include "compiled_method.h"
#include "linker/linker_patch.h"
@@ -80,65 +81,7 @@ class CompiledMethodStorage::DedupeHashFunc {
public:
size_t operator()(const ArrayRef<ContentType>& array) const {
- const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
- // TODO: More reasonable assertion.
- // static_assert(IsPowerOfTwo(sizeof(ContentType)),
- // "ContentType is not power of two, don't know whether array layout is as assumed");
- uint32_t len = sizeof(ContentType) * array.size();
- if (kUseMurmur3Hash) {
- static constexpr uint32_t c1 = 0xcc9e2d51;
- static constexpr uint32_t c2 = 0x1b873593;
- static constexpr uint32_t r1 = 15;
- static constexpr uint32_t r2 = 13;
- static constexpr uint32_t m = 5;
- static constexpr uint32_t n = 0xe6546b64;
-
- uint32_t hash = 0;
-
- const int nblocks = len / 4;
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
- int i;
- for (i = 0; i < nblocks; i++) {
- uint32_t k = blocks[i];
- k *= c1;
- k = (k << r1) | (k >> (32 - r1));
- k *= c2;
-
- hash ^= k;
- hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
- }
-
- const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
- uint32_t k1 = 0;
-
- switch (len & 3) {
- case 3:
- k1 ^= tail[2] << 16;
- FALLTHROUGH_INTENDED;
- case 2:
- k1 ^= tail[1] << 8;
- FALLTHROUGH_INTENDED;
- case 1:
- k1 ^= tail[0];
-
- k1 *= c1;
- k1 = (k1 << r1) | (k1 >> (32 - r1));
- k1 *= c2;
- hash ^= k1;
- }
-
- hash ^= len;
- hash ^= (hash >> 16);
- hash *= 0x85ebca6b;
- hash ^= (hash >> 13);
- hash *= 0xc2b2ae35;
- hash ^= (hash >> 16);
-
- return hash;
- } else {
- return HashBytes(data, len);
- }
+ return DataHash()(array);
}
};
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index 42fbba5109..aed04f9c75 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -31,8 +31,6 @@ TEST(CompiledMethodStorage, Deduplicate) {
CompilerDriver driver(&compiler_options,
&verification_results,
Compiler::kOptimizing,
- /* instruction_set_ */ InstructionSet::kNone,
- /* instruction_set_features */ nullptr,
/* image_classes */ nullptr,
/* thread_count */ 1u,
/* swap_fd */ -1,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 653e9edb45..7e6fdaf633 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -67,7 +67,6 @@
#include "mirror/object-refvisitor-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/throwable.h"
-#include "nativehelper/ScopedLocalRef.h"
#include "object_lock.h"
#include "profile/profile_compilation_info.h"
#include "runtime.h"
@@ -262,9 +261,7 @@ CompilerDriver::CompilerDriver(
const CompilerOptions* compiler_options,
VerificationResults* verification_results,
Compiler::Kind compiler_kind,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ HashSet<std::string>* image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info)
@@ -272,18 +269,14 @@ CompilerDriver::CompilerDriver(
verification_results_(verification_results),
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
- instruction_set_(
- instruction_set == InstructionSet::kArm ? InstructionSet::kThumb2 : instruction_set),
- instruction_set_features_(instruction_set_features),
requires_constructor_barrier_lock_("constructor barrier lock"),
non_relative_linker_patch_count_(0u),
- image_classes_(image_classes),
+ image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
parallel_thread_count_(thread_count),
stats_(new AOTCompilationStats),
compiler_context_(nullptr),
- support_boot_image_fixup_(true),
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
max_arena_alloc_(0),
@@ -293,7 +286,7 @@ CompilerDriver::CompilerDriver(
compiler_->Init();
if (GetCompilerOptions().IsBootImage()) {
- CHECK(image_classes_.get() != nullptr) << "Expected image classes for boot image";
+ CHECK(image_classes_ != nullptr) << "Expected image classes for boot image";
}
compiled_method_storage_.SetDedupeEnabled(compiler_options_->DeduplicateCode());
@@ -310,13 +303,15 @@ CompilerDriver::~CompilerDriver() {
}
-#define CREATE_TRAMPOLINE(type, abi, offset) \
- if (Is64BitInstructionSet(instruction_set_)) { \
- return CreateTrampoline64(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \
- } else { \
- return CreateTrampoline32(instruction_set_, abi, \
- type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \
+#define CREATE_TRAMPOLINE(type, abi, offset) \
+ if (Is64BitInstructionSet(GetCompilerOptions().GetInstructionSet())) { \
+ return CreateTrampoline64(GetCompilerOptions().GetInstructionSet(), \
+ abi, \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k64, offset)); \
+ } else { \
+ return CreateTrampoline32(GetCompilerOptions().GetInstructionSet(), \
+ abi, \
+ type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset)); \
}
std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
@@ -351,12 +346,6 @@ void CompilerDriver::CompileAll(jobject class_loader,
InitializeThreadPools();
- VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
- // Precompile:
- // 1) Load image classes
- // 2) Resolve all classes
- // 3) Attempt to verify all classes
- // 4) Attempt to initialize image classes, and trivially initialized classes
PreCompile(class_loader, dex_files, timings);
if (GetCompilerOptions().IsBootImage()) {
// We don't need to setup the intrinsics for non boot image compilation, as
@@ -608,7 +597,7 @@ static void CompileMethodQuick(
if ((access_flags & kAccNative) != 0) {
// Are we extracting only and have support for generic JNI down calls?
if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
- InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
+ InstructionSetHasGenericJniStub(driver->GetCompilerOptions().GetInstructionSet())) {
// Leaving this empty will trigger the generic JNI version
} else {
// Query any JNI optimization annotations such as @FastNative or @CriticalNative.
@@ -673,46 +662,24 @@ static void CompileMethodQuick(
quick_fn);
}
-void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) {
- DCHECK(!Runtime::Current()->IsStarted());
- jobject jclass_loader;
- const DexFile* dex_file;
- uint16_t class_def_idx;
- uint32_t method_idx = method->GetDexMethodIndex();
- uint32_t access_flags = method->GetAccessFlags();
- InvokeType invoke_type = method->GetInvokeType();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(method->GetDeclaringClass()->GetClassLoader()));
- {
- ScopedObjectAccessUnchecked soa(self);
- ScopedLocalRef<jobject> local_class_loader(
- soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get()));
- jclass_loader = soa.Env()->NewGlobalRef(local_class_loader.get());
- // Find the dex_file
- dex_file = method->GetDexFile();
- class_def_idx = method->GetClassDefIndex();
- }
- const DexFile::CodeItem* code_item = dex_file->GetCodeItem(method->GetCodeItemOffset());
-
- // Go to native so that we don't block GC during compilation.
- ScopedThreadSuspension sts(self, kNative);
-
- std::vector<const DexFile*> dex_files;
- dex_files.push_back(dex_file);
-
- InitializeThreadPools();
-
- PreCompile(jclass_loader, dex_files, timings);
-
+// Compile a single Method. (For testing only.)
+void CompilerDriver::CompileOne(Thread* self,
+ jobject class_loader,
+ const DexFile& dex_file,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ const DexFile::CodeItem* code_item,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> h_class_loader) {
// Can we run DEX-to-DEX compiler on this class ?
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(self,
*this,
- jclass_loader,
- *dex_file,
- dex_file->GetClassDef(class_def_idx));
+ class_loader,
+ dex_file,
+ dex_file.GetClassDef(class_def_idx));
CompileMethodQuick(self,
this,
@@ -721,8 +688,8 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- class_loader,
- *dex_file,
+ h_class_loader,
+ dex_file,
dex_to_dex_compilation_level,
true,
dex_cache);
@@ -737,17 +704,13 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
invoke_type,
class_def_idx,
method_idx,
- class_loader,
- *dex_file,
+ h_class_loader,
+ dex_file,
dex_to_dex_compilation_level,
true,
dex_cache);
dex_to_dex_compiler_.ClearState();
}
-
- FreeThreadPools();
-
- self->GetJniEnv()->DeleteGlobalRef(jclass_loader);
}
void CompilerDriver::Resolve(jobject class_loader,
@@ -838,7 +801,7 @@ static void InitializeTypeCheckBitstrings(CompilerDriver* driver,
// primitive) classes. We may reconsider this in future if it's deemed to be beneficial.
// And we cannot use it for classes outside the boot image as we do not know the runtime
// value of their bitstring when compiling (it may not even get assigned at runtime).
- if (descriptor[0] == 'L' && driver->IsImageClass(descriptor)) {
+ if (descriptor[0] == 'L' && driver->GetCompilerOptions().IsImageClass(descriptor)) {
ObjPtr<mirror::Class> klass =
class_linker->LookupResolvedType(type_index,
dex_cache.Get(),
@@ -919,6 +882,20 @@ void CompilerDriver::PreCompile(jobject class_loader,
TimingLogger* timings) {
CheckThreadPools();
+ VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
+
+ compiled_classes_.AddDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
+ dex_to_dex_compiler_.SetDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
+
+ // Precompile:
+ // 1) Load image classes.
+ // 2) Resolve all classes.
+ // 3) For deterministic boot image, resolve strings for const-string instructions.
+ // 4) Attempt to verify all classes.
+ // 5) Attempt to initialize image classes, and trivially initialized classes.
+ // 6) Update the set of image classes.
+ // 7) For deterministic boot image, initialize bitstrings for type checking.
+
LoadImageClasses(timings);
VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false);
@@ -988,21 +965,11 @@ void CompilerDriver::PreCompile(jobject class_loader,
}
}
-bool CompilerDriver::IsImageClass(const char* descriptor) const {
- if (image_classes_ != nullptr) {
- // If we have a set of image classes, use those.
- return image_classes_->find(descriptor) != image_classes_->end();
- }
- // No set of image classes, assume we include all the classes.
- // NOTE: Currently only reachable from InitImageMethodVisitor for the app image case.
- return !GetCompilerOptions().IsBootImage();
-}
-
bool CompilerDriver::IsClassToCompile(const char* descriptor) const {
if (classes_to_compile_ == nullptr) {
return true;
}
- return classes_to_compile_->find(descriptor) != classes_to_compile_->end();
+ return classes_to_compile_->find(StringPiece(descriptor)) != classes_to_compile_->end();
}
bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
@@ -1091,7 +1058,7 @@ class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor {
class RecordImageClassesVisitor : public ClassVisitor {
public:
- explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
+ explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
: image_classes_(image_classes) {}
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1101,7 +1068,7 @@ class RecordImageClassesVisitor : public ClassVisitor {
}
private:
- std::unordered_set<std::string>* const image_classes_;
+ HashSet<std::string>* const image_classes_;
};
// Make a list of descriptors for classes to include in the image
@@ -1116,7 +1083,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- CHECK(image_classes_.get() != nullptr);
+ CHECK(image_classes_ != nullptr);
for (auto it = image_classes_->begin(), end = image_classes_->end(); it != end;) {
const std::string& descriptor(*it);
StackHandleScope<1> hs(self);
@@ -1124,7 +1091,7 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
if (klass == nullptr) {
VLOG(compiler) << "Failed to find class " << descriptor;
- image_classes_->erase(it++);
+ it = image_classes_->erase(it);
self->ClearException();
} else {
++it;
@@ -1174,15 +1141,15 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
// We walk the roots looking for classes so that we'll pick up the
// above classes plus any classes them depend on such super
// classes, interfaces, and the required ClassLinker roots.
- RecordImageClassesVisitor visitor(image_classes_.get());
+ RecordImageClassesVisitor visitor(image_classes_);
class_linker->VisitClasses(&visitor);
- CHECK_NE(image_classes_->size(), 0U);
+ CHECK(!image_classes_->empty());
}
static void MaybeAddToImageClasses(Thread* self,
ObjPtr<mirror::Class> klass,
- std::unordered_set<std::string>* image_classes)
+ HashSet<std::string>* image_classes)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(self, Thread::Current());
StackHandleScope<1> hs(self);
@@ -1190,11 +1157,10 @@ static void MaybeAddToImageClasses(Thread* self,
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
while (!klass->IsObjectClass()) {
const char* descriptor = klass->GetDescriptor(&temp);
- std::pair<std::unordered_set<std::string>::iterator, bool> result =
- image_classes->insert(descriptor);
- if (!result.second) { // Previously inserted.
- break;
+ if (image_classes->find(StringPiece(descriptor)) != image_classes->end()) {
+ break; // Previously inserted.
}
+ image_classes->insert(descriptor);
VLOG(compiler) << "Adding " << descriptor << " to image classes";
for (size_t i = 0, num_interfaces = klass->NumDirectInterfaces(); i != num_interfaces; ++i) {
ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, klass, i);
@@ -1216,7 +1182,7 @@ static void MaybeAddToImageClasses(Thread* self,
class ClinitImageUpdate {
public:
static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) {
std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
@@ -1273,7 +1239,7 @@ class ClinitImageUpdate {
bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
- const char* name = klass->GetDescriptor(&temp);
+ StringPiece name(klass->GetDescriptor(&temp));
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
data_->image_classes_.push_back(hs_.NewHandle(klass));
} else {
@@ -1292,7 +1258,7 @@ class ClinitImageUpdate {
};
ClinitImageUpdate(VariableSizedHandleScope& hs,
- std::unordered_set<std::string>* image_class_descriptors,
+ HashSet<std::string>* image_class_descriptors,
Thread* self,
ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
: hs_(hs),
@@ -1339,7 +1305,7 @@ class ClinitImageUpdate {
VariableSizedHandleScope& hs_;
mutable std::vector<Handle<mirror::Class>> to_insert_;
mutable std::unordered_set<mirror::Object*> marked_objects_;
- std::unordered_set<std::string>* const image_class_descriptors_;
+ HashSet<std::string>* const image_class_descriptors_;
std::vector<Handle<mirror::Class>> image_classes_;
Thread* const self_;
const char* old_cause_;
@@ -1359,7 +1325,7 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) {
VariableSizedHandleScope hs(Thread::Current());
std::string error_msg;
std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
- image_classes_.get(),
+ image_classes_,
Thread::Current(),
runtime->GetClassLinker()));
@@ -1383,7 +1349,7 @@ bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
}
std::string temp;
const char* descriptor = klass->GetDescriptor(&temp);
- return IsImageClass(descriptor);
+ return GetCompilerOptions().IsImageClass(descriptor);
}
bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
@@ -1685,16 +1651,14 @@ static void CheckAndClearResolveException(Thread* self)
bool CompilerDriver::RequiresConstructorBarrier(const DexFile& dex_file,
uint16_t class_def_idx) const {
- ClassAccessor accessor(dex_file, dex_file.GetClassDef(class_def_idx));
- bool has_is_final = false;
+ ClassAccessor accessor(dex_file, class_def_idx);
// We require a constructor barrier if there are final instance fields.
- accessor.VisitFields(/*static*/ VoidFunctor(),
- [&](const ClassAccessor::Field& field) {
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
if (field.IsFinal()) {
- has_is_final = true;
+ return true;
}
- });
- return has_is_final;
+ }
+ return false;
}
class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
@@ -1744,7 +1708,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor {
// fields are assigned within the lock held for class initialization.
bool requires_constructor_barrier = false;
- ClassAccessor accessor(dex_file, class_def);
+ ClassAccessor accessor(dex_file, class_def_index);
// Optionally resolve fields and methods and figure out if we need a constructor barrier.
auto method_visitor = [&](const ClassAccessor::Method& method)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1926,13 +1890,12 @@ bool CompilerDriver::FastVerify(jobject jclass_loader,
// Fetch the list of unverified classes.
const std::set<dex::TypeIndex>& unverified_classes =
verifier_deps->GetUnverifiedClasses(*dex_file);
- uint32_t class_def_idx = 0u;
for (ClassAccessor accessor : dex_file->GetClasses()) {
if (unverified_classes.find(accessor.GetClassIdx()) == unverified_classes.end()) {
if (compiler_only_verifies) {
// Just update the compiled_classes_ map. The compiler doesn't need to resolve
// the type.
- ClassReference ref(dex_file, class_def_idx);
+ ClassReference ref(dex_file, accessor.GetClassDefIndex());
const ClassStatus existing = ClassStatus::kNotReady;
ClassStateTable::InsertResult result =
compiled_classes_.Insert(ref, existing, ClassStatus::kVerified);
@@ -1959,7 +1922,6 @@ bool CompilerDriver::FastVerify(jobject jclass_loader,
class_loader,
soa.Self());
}
- ++class_def_idx;
}
}
return true;
@@ -1986,7 +1948,8 @@ void CompilerDriver::Verify(jobject jclass_loader,
// Create per-thread VerifierDeps to avoid contention on the main one.
// We will merge them after verification.
for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) {
- worker->GetThread()->SetVerifierDeps(new verifier::VerifierDeps(dex_files_for_oat_file_));
+ worker->GetThread()->SetVerifierDeps(
+ new verifier::VerifierDeps(GetCompilerOptions().GetDexFilesForOatFile()));
}
}
@@ -2011,7 +1974,7 @@ void CompilerDriver::Verify(jobject jclass_loader,
for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) {
verifier::VerifierDeps* thread_deps = worker->GetThread()->GetVerifierDeps();
worker->GetThread()->SetVerifierDeps(nullptr);
- verifier_deps->MergeWith(*thread_deps, dex_files_for_oat_file_);
+ verifier_deps->MergeWith(*thread_deps, GetCompilerOptions().GetDexFilesForOatFile());
delete thread_deps;
}
Thread::Current()->SetVerifierDeps(nullptr);
@@ -2179,8 +2142,9 @@ class SetVerifiedClassVisitor : public CompilationVisitor {
mirror::Class::SetStatus(klass, ClassStatus::kVerified, soa.Self());
// Mark methods as pre-verified. If we don't do this, the interpreter will run with
// access checks.
- klass->SetSkipAccessChecksFlagOnAllMethods(
- GetInstructionSetPointerSize(manager_->GetCompiler()->GetInstructionSet()));
+ InstructionSet instruction_set =
+ manager_->GetCompiler()->GetCompilerOptions().GetInstructionSet();
+ klass->SetSkipAccessChecksFlagOnAllMethods(GetInstructionSetPointerSize(instruction_set));
klass->SetVerificationAttempted();
}
// Record the final class status if necessary.
@@ -2297,7 +2261,7 @@ class InitializeClassVisitor : public CompilationVisitor {
(is_app_image || is_boot_image) &&
is_superclass_initialized &&
!too_many_encoded_fields &&
- manager_->GetCompiler()->IsImageClass(descriptor)) {
+ manager_->GetCompiler()->GetCompilerOptions().IsImageClass(descriptor)) {
bool can_init_static_fields = false;
if (is_boot_image) {
// We need to initialize static fields, we only do this for image classes that aren't
@@ -2700,7 +2664,7 @@ static void CompileDexFile(CompilerDriver* driver,
jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ClassAccessor accessor(dex_file, class_def);
+ ClassAccessor accessor(dex_file, class_def_index);
// Skip compiling classes with generic verifier failures since they will still fail at runtime
if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
return;
@@ -2884,7 +2848,7 @@ void CompilerDriver::RecordClassStatus(const ClassReference& ref, ClassStatus st
if (kIsDebugBuild) {
// Check to make sure it's not a dex file for an oat file we are compiling since these
// should always succeed. These do not include classes in for used libraries.
- for (const DexFile* dex_file : GetDexFilesForOatFile()) {
+ for (const DexFile* dex_file : GetCompilerOptions().GetDexFilesForOatFile()) {
CHECK_NE(ref.dex_file, dex_file) << ref.dex_file->GetLocation();
}
}
@@ -2983,18 +2947,6 @@ std::string CompilerDriver::GetMemoryUsageString(bool extended) const {
return oss.str();
}
-bool CompilerDriver::MayInlineInternal(const DexFile* inlined_from,
- const DexFile* inlined_into) const {
- // We're not allowed to inline across dex files if we're the no-inline-from dex file.
- if (inlined_from != inlined_into &&
- compiler_options_->GetNoInlineFromDexFile() != nullptr &&
- ContainsElement(*compiler_options_->GetNoInlineFromDexFile(), inlined_from)) {
- return false;
- }
-
- return true;
-}
-
void CompilerDriver::InitializeThreadPools() {
size_t parallel_count = parallel_thread_count_ > 0 ? parallel_thread_count_ - 1 : 0;
parallel_thread_pool_.reset(
@@ -3007,12 +2959,6 @@ void CompilerDriver::FreeThreadPools() {
single_thread_pool_.reset();
}
-void CompilerDriver::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
- dex_files_for_oat_file_ = dex_files;
- compiled_classes_.AddDexFiles(dex_files);
- dex_to_dex_compiler_.SetDexFiles(dex_files);
-}
-
void CompilerDriver::SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files) {
classpath_classes_.AddDexFiles(dex_files);
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 55f3561e3a..0a8754a6a6 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -20,7 +20,6 @@
#include <atomic>
#include <set>
#include <string>
-#include <unordered_set>
#include <vector>
#include "android-base/strings.h"
@@ -28,6 +27,7 @@
#include "arch/instruction_set.h"
#include "base/array_ref.h"
#include "base/bit_utils.h"
+#include "base/hash_set.h"
#include "base/mutex.h"
#include "base/os.h"
#include "base/quasi_atomic.h"
@@ -97,45 +97,36 @@ class CompilerDriver {
CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
Compiler::Kind compiler_kind,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- std::unordered_set<std::string>* image_classes,
+ HashSet<std::string>* image_classes,
size_t thread_count,
int swap_fd,
const ProfileCompilationInfo* profile_compilation_info);
~CompilerDriver();
- // Set dex files associated with the oat file being compiled.
- void SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files);
-
// Set dex files classpath.
void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
- // Get dex files associated with the the oat file being compiled.
- ArrayRef<const DexFile* const> GetDexFilesForOatFile() const {
- return ArrayRef<const DexFile* const>(dex_files_for_oat_file_);
- }
-
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
- // Compile a single Method.
- void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ // Compile a single Method. (For testing only.)
+ void CompileOne(Thread* self,
+ jobject class_loader,
+ const DexFile& dex_file,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ const DexFile::CodeItem* code_item,
+ Handle<mirror::DexCache> dex_cache,
+ Handle<mirror::ClassLoader> h_class_loader)
+ REQUIRES(!Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const;
- InstructionSet GetInstructionSet() const {
- return instruction_set_;
- }
-
- const InstructionSetFeatures* GetInstructionSetFeatures() const {
- return instruction_set_features_;
- }
-
const CompilerOptions& GetCompilerOptions() const {
return *compiler_options_;
}
@@ -144,10 +135,6 @@ class CompilerDriver {
return compiler_.get();
}
- const std::unordered_set<std::string>* GetImageClasses() const {
- return image_classes_.get();
- }
-
// Generate the trampolines that are invoked by unresolved direct methods.
std::unique_ptr<const std::vector<uint8_t>> CreateJniDlsymLookup() const;
std::unique_ptr<const std::vector<uint8_t>> CreateQuickGenericJniTrampoline() const;
@@ -280,14 +267,6 @@ class CompilerDriver {
const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const;
bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc);
- bool GetSupportBootImageFixup() const {
- return support_boot_image_fixup_;
- }
-
- void SetSupportBootImageFixup(bool support_boot_image_fixup) {
- support_boot_image_fixup_ = support_boot_image_fixup;
- }
-
void SetCompilerContext(void* compiler_context) {
compiler_context_ = compiler_context;
}
@@ -308,9 +287,6 @@ class CompilerDriver {
return compiled_method_storage_.DedupeEnabled();
}
- // Checks if class specified by type_idx is one of the image_classes_
- bool IsImageClass(const char* descriptor) const;
-
// Checks whether the provided class should be compiled, i.e., is in classes_to_compile_.
bool IsClassToCompile(const char* descriptor) const;
@@ -352,13 +328,6 @@ class CompilerDriver {
bool CanAssumeClassIsLoaded(mirror::Class* klass)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const {
- if (!kIsTargetBuild) {
- return MayInlineInternal(inlined_from, inlined_into);
- }
- return true;
- }
-
const ProfileCompilationInfo* GetProfileCompilationInfo() const {
return profile_compilation_info_;
}
@@ -452,8 +421,6 @@ class CompilerDriver {
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings);
- bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const;
-
void InitializeThreadPools();
void FreeThreadPools();
void CheckThreadPools();
@@ -466,9 +433,6 @@ class CompilerDriver {
std::unique_ptr<Compiler> compiler_;
Compiler::Kind compiler_kind_;
- const InstructionSet instruction_set_;
- const InstructionSetFeatures* const instruction_set_features_;
-
// All class references that require constructor barriers. If the class reference is not in the
// set then the result has not yet been computed.
mutable ReaderWriterMutex requires_constructor_barrier_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -491,22 +455,24 @@ class CompilerDriver {
// in the .oat_patches ELF section if requested in the compiler options.
Atomic<size_t> non_relative_linker_patch_count_;
- // If image_ is true, specifies the classes that will be included in the image.
- // Note if image_classes_ is null, all classes are included in the image.
- std::unique_ptr<std::unordered_set<std::string>> image_classes_;
+ // Image classes to be updated by PreCompile().
+ // TODO: Remove this member which is a non-const pointer to the CompilerOptions' data.
+ // Pass this explicitly to the PreCompile() which should be called directly from
+ // Dex2Oat rather than implicitly by CompileAll().
+ HashSet<std::string>* image_classes_;
// Specifies the classes that will be compiled. Note that if classes_to_compile_ is null,
// all classes are eligible for compilation (duplication filters etc. will still apply).
// This option may be restricted to the boot image, depending on a flag in the implementation.
- std::unique_ptr<std::unordered_set<std::string>> classes_to_compile_;
+ std::unique_ptr<HashSet<std::string>> classes_to_compile_;
std::atomic<uint32_t> number_of_soft_verifier_failures_;
bool had_hard_verifier_failure_;
// A thread pool that can (potentially) run tasks in parallel.
- std::unique_ptr<ThreadPool> parallel_thread_pool_;
size_t parallel_thread_count_;
+ std::unique_ptr<ThreadPool> parallel_thread_pool_;
// A thread pool that guarantees running single-threaded on the main thread.
std::unique_ptr<ThreadPool> single_thread_pool_;
@@ -519,11 +485,6 @@ class CompilerDriver {
void* compiler_context_;
- bool support_boot_image_fixup_;
-
- // List of dex files associates with the oat file.
- std::vector<const DexFile*> dex_files_for_oat_file_;
-
CompiledMethodStorage compiled_method_storage_;
// Info for profile guided compilation.
@@ -534,6 +495,7 @@ class CompilerDriver {
// Compiler for dex to dex (quickening).
optimizer::DexToDexCompiler dex_to_dex_compiler_;
+ friend class CommonCompilerTest;
friend class CompileClassVisitor;
friend class DexToDexDecompilerTest;
friend class verifier::VerifierDepsTest;
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 491e61f9b5..2eeb4399db 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -46,7 +46,7 @@ class CompilerDriverTest : public CommonCompilerTest {
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
dex_files_ = GetDexFiles(class_loader);
- compiler_driver_->SetDexFilesForOatFile(dex_files_);;
+ SetDexFilesForOatFile(dex_files_);
compiler_driver_->CompileAll(class_loader, dex_files_, &timings);
t.NewTiming("MakeAllExecutable");
MakeAllExecutable(class_loader);
@@ -331,7 +331,7 @@ TEST_F(CompilerDriverVerifyTest, RetryVerifcationStatusCheckVerified) {
ASSERT_GT(dex_files.size(), 0u);
dex_file = dex_files.front();
}
- compiler_driver_->SetDexFilesForOatFile(dex_files);
+ SetDexFilesForOatFile(dex_files);
callbacks_->SetDoesClassUnloading(true, compiler_driver_.get());
ClassReference ref(dex_file, 0u);
// Test that the status is read from the compiler driver as expected.
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 933be4f004..62d547de44 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -20,6 +20,8 @@
#include "android-base/stringprintf.h"
+#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
#include "base/runtime_debug.h"
#include "base/variant_map.h"
#include "cmdline_parser.h"
@@ -37,11 +39,14 @@ CompilerOptions::CompilerOptions()
tiny_method_threshold_(kDefaultTinyMethodThreshold),
num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
inline_max_code_units_(kUnsetInlineMaxCodeUnits),
- no_inline_from_(nullptr),
+ instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA),
+ instruction_set_features_(nullptr),
+ no_inline_from_(),
+ dex_files_for_oat_file_(),
+ image_classes_(),
boot_image_(false),
core_image_(false),
app_image_(false),
- top_k_profile_threshold_(kDefaultTopKProfileThreshold),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
generate_mini_debug_info_(kDefaultGenerateMiniDebugInfo),
@@ -53,6 +58,7 @@ CompilerOptions::CompilerOptions()
dump_timings_(false),
dump_pass_timings_(false),
dump_stats_(false),
+ top_k_profile_threshold_(kDefaultTopKProfileThreshold),
verbose_methods_(),
abort_on_hard_verifier_failure_(false),
abort_on_soft_verifier_failure_(false),
@@ -67,8 +73,8 @@ CompilerOptions::CompilerOptions()
}
CompilerOptions::~CompilerOptions() {
- // The destructor looks empty but it destroys a PassManagerOptions object. We keep it here
- // because we don't want to include the PassManagerOptions definition from the header file.
+ // Everything done by member destructors.
+ // The definitions of classes forward-declared in the header have now been #included.
}
namespace {
@@ -129,4 +135,11 @@ bool CompilerOptions::ParseCompilerOptions(const std::vector<std::string>& optio
#pragma GCC diagnostic pop
+bool CompilerOptions::IsImageClass(const char* descriptor) const {
+ // Historical note: We used to hold the set indirectly and there was a distinction between an
+ // empty set and a null, null meaning to include all classes. However, the distiction has been
+ // removed; if we don't have a profile, we treat it as an empty set of classes. b/77340429
+ return image_classes_.find(StringPiece(descriptor)) != image_classes_.end();
+}
+
} // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index cee989b315..601c9140dd 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -17,11 +17,13 @@
#ifndef ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
#define ART_COMPILER_DRIVER_COMPILER_OPTIONS_H_
+#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include "base/globals.h"
+#include "base/hash_set.h"
#include "base/macros.h"
#include "base/utils.h"
#include "compiler_filter.h"
@@ -29,11 +31,17 @@
namespace art {
+namespace jit {
+class JitCompiler;
+} // namespace jit
+
namespace verifier {
class VerifierDepsTest;
} // namespace verifier
class DexFile;
+enum class InstructionSet;
+class InstructionSetFeatures;
class CompilerOptions FINAL {
public:
@@ -230,10 +238,29 @@ class CompilerOptions FINAL {
return abort_on_soft_verifier_failure_;
}
- const std::vector<const DexFile*>* GetNoInlineFromDexFile() const {
+ InstructionSet GetInstructionSet() const {
+ return instruction_set_;
+ }
+
+ const InstructionSetFeatures* GetInstructionSetFeatures() const {
+ return instruction_set_features_.get();
+ }
+
+
+ const std::vector<const DexFile*>& GetNoInlineFromDexFile() const {
return no_inline_from_;
}
+ const std::vector<const DexFile*>& GetDexFilesForOatFile() const {
+ return dex_files_for_oat_file_;
+ }
+
+ const HashSet<std::string>& GetImageClasses() const {
+ return image_classes_;
+ }
+
+ bool IsImageClass(const char* descriptor) const;
+
bool ParseCompilerOptions(const std::vector<std::string>& options,
bool ignore_unrecognized,
std::string* error_msg);
@@ -301,16 +328,24 @@ class CompilerOptions FINAL {
size_t num_dex_methods_threshold_;
size_t inline_max_code_units_;
- // Dex files from which we should not inline code.
+ InstructionSet instruction_set_;
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
+
+ // Dex files from which we should not inline code. Does not own the dex files.
// This is usually a very short list (i.e. a single dex file), so we
// prefer vector<> over a lookup-oriented container, such as set<>.
- const std::vector<const DexFile*>* no_inline_from_;
+ std::vector<const DexFile*> no_inline_from_;
+
+ // List of dex files associated with the oat file, empty for JIT.
+ std::vector<const DexFile*> dex_files_for_oat_file_;
+
+ // Image classes, specifies the classes that will be included in the image if creating an image.
+ // Must not be empty for real boot image, only for tests pretending to compile boot image.
+ HashSet<std::string> image_classes_;
bool boot_image_;
bool core_image_;
bool app_image_;
- // When using a profile file only the top K% of the profiled samples will be compiled.
- double top_k_profile_threshold_;
bool debuggable_;
bool generate_debug_info_;
bool generate_mini_debug_info_;
@@ -323,6 +358,9 @@ class CompilerOptions FINAL {
bool dump_pass_timings_;
bool dump_stats_;
+ // When using a profile file only the top K% of the profiled samples will be compiled.
+ double top_k_profile_threshold_;
+
// Vector of methods to have verbose output enabled for.
std::vector<std::string> verbose_methods_;
@@ -362,6 +400,7 @@ class CompilerOptions FINAL {
friend class Dex2Oat;
friend class DexToDexDecompilerTest;
friend class CommonCompilerTest;
+ friend class jit::JitCompiler;
friend class verifier::VerifierDepsTest;
template <class Base>
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 15c07870a1..b56a991e74 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -15,6 +15,7 @@
*/
#include <memory>
+#include <type_traits>
#include "base/arena_allocator.h"
#include "base/callee_save_type.h"
@@ -76,13 +77,10 @@ class ExceptionTest : public CommonRuntimeTest {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stack_maps(&allocator, kRuntimeISA);
- stack_maps.BeginStackMapEntry(kDexPc,
- native_pc_offset,
- /* register_mask */ 0u,
- /* sp_mask */ nullptr,
- /* num_dex_registers */ 0u,
- /* inlining_depth */ 0u);
+ stack_maps.BeginMethod(4 * sizeof(void*), 0u, 0u, 0u);
+ stack_maps.BeginStackMapEntry(kDexPc, native_pc_offset);
stack_maps.EndStackMapEntry();
+ stack_maps.EndMethod();
const size_t stack_maps_size = stack_maps.PrepareForFillIn();
const size_t header_size = sizeof(OatQuickMethodHeader);
const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA);
@@ -92,6 +90,7 @@ class ExceptionTest : public CommonRuntimeTest {
MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
stack_maps.FillInCodeInfo(stack_maps_region);
OatQuickMethodHeader method_header(code_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size);
+ static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
memcpy(&fake_header_code_and_maps_[code_offset - header_size], &method_header, header_size);
std::copy(fake_code_.begin(),
fake_code_.end(),
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 0de00a82fa..a881c5ec98 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -33,6 +33,7 @@
#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/jit_logger.h"
#include "oat_file-inl.h"
#include "oat_quick_method_header.h"
#include "object_lock.h"
@@ -50,7 +51,7 @@ extern "C" void* jit_load(bool* generate_debug_info) {
VLOG(jit) << "loading jit compiler";
auto* const jit_compiler = JitCompiler::Create();
CHECK(jit_compiler != nullptr);
- *generate_debug_info = jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo();
+ *generate_debug_info = jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
VLOG(jit) << "Done loading jit compiler";
return jit_compiler;
}
@@ -72,10 +73,11 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
- if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
+ const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
- kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
+ kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
// We never free debug info for types, so we don't need to provide a handle
// (which would have been otherwise used as identifier to remove it later).
@@ -103,51 +105,56 @@ JitCompiler::JitCompiler() {
// Set debuggability based on the runtime value.
compiler_options_->SetDebuggable(Runtime::Current()->IsJavaDebuggable());
- const InstructionSet instruction_set = kRuntimeISA;
+ const InstructionSet instruction_set = compiler_options_->GetInstructionSet();
+ if (kRuntimeISA == InstructionSet::kArm) {
+ DCHECK_EQ(instruction_set, InstructionSet::kThumb2);
+ } else {
+ DCHECK_EQ(instruction_set, kRuntimeISA);
+ }
+ std::unique_ptr<const InstructionSetFeatures> instruction_set_features;
for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
VLOG(compiler) << "JIT compiler option " << option;
std::string error_msg;
if (option.starts_with("--instruction-set-variant=")) {
StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
VLOG(compiler) << "JIT instruction set variant " << str;
- instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ instruction_set_features = InstructionSetFeatures::FromVariant(
instruction_set, str.as_string(), &error_msg);
- if (instruction_set_features_ == nullptr) {
+ if (instruction_set_features == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
} else if (option.starts_with("--instruction-set-features=")) {
StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
VLOG(compiler) << "JIT instruction set features " << str;
- if (instruction_set_features_ == nullptr) {
- instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ if (instruction_set_features == nullptr) {
+ instruction_set_features = InstructionSetFeatures::FromVariant(
instruction_set, "default", &error_msg);
- if (instruction_set_features_ == nullptr) {
+ if (instruction_set_features == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
- instruction_set_features_ =
- instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg);
- if (instruction_set_features_ == nullptr) {
+ instruction_set_features =
+ instruction_set_features->AddFeaturesFromString(str.as_string(), &error_msg);
+ if (instruction_set_features == nullptr) {
LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
}
}
}
- if (instruction_set_features_ == nullptr) {
- instruction_set_features_ = InstructionSetFeatures::FromCppDefines();
+ if (instruction_set_features == nullptr) {
+ instruction_set_features = InstructionSetFeatures::FromCppDefines();
}
+ compiler_options_->instruction_set_features_ = std::move(instruction_set_features);
+
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(),
/* verification_results */ nullptr,
Compiler::kOptimizing,
- instruction_set,
- instruction_set_features_.get(),
/* image_classes */ nullptr,
/* thread_count */ 1,
/* swap_fd */ -1,
/* profile_compilation_info */ nullptr));
// Disable dedupe so we can remove compiled methods.
compiler_driver_->SetDedupeEnabled(false);
- compiler_driver_->SetSupportBootImageFixup(false);
size_t thread_count = compiler_driver_->GetThreadCount();
if (compiler_options_->GetGenerateDebugInfo()) {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 31dc9e2fe5..5840fece2e 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -18,18 +18,19 @@
#define ART_COMPILER_JIT_JIT_COMPILER_H_
#include "base/mutex.h"
-#include "compiled_method.h"
-#include "driver/compiler_driver.h"
-#include "driver/compiler_options.h"
-#include "jit_logger.h"
namespace art {
class ArtMethod;
-class InstructionSetFeatures;
+class CompiledMethod;
+class CompilerDriver;
+class CompilerOptions;
+class Thread;
namespace jit {
+class JitLogger;
+
class JitCompiler {
public:
static JitCompiler* Create();
@@ -39,8 +40,8 @@ class JitCompiler {
bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
- CompilerOptions* GetCompilerOptions() const {
- return compiler_options_.get();
+ const CompilerOptions& GetCompilerOptions() const {
+ return *compiler_options_.get();
}
CompilerDriver* GetCompilerDriver() const {
return compiler_driver_.get();
@@ -49,7 +50,6 @@ class JitCompiler {
private:
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CompilerDriver> compiler_driver_;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
std::unique_ptr<JitLogger> jit_logger_;
JitCompiler();
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 0902bf2bce..62e8e0264f 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -34,7 +34,6 @@
#include "class_linker.h"
#include "debug/dwarf/debug_frame_opcode_writer.h"
#include "dex/dex_file-inl.h"
-#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni/jni_env_ext.h"
@@ -115,7 +114,7 @@ static ThreadOffset<kPointerSize> GetJniEntrypointThreadOffset(JniEntrypoint whi
// convention.
//
template <PointerSize kPointerSize>
-static JniCompiledMethod ArtJniCompileMethodInternal(CompilerDriver* driver,
+static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& compiler_options,
uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file) {
@@ -124,8 +123,9 @@ static JniCompiledMethod ArtJniCompileMethodInternal(CompilerDriver* driver,
const bool is_static = (access_flags & kAccStatic) != 0;
const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
- InstructionSet instruction_set = driver->GetInstructionSet();
- const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
+ InstructionSet instruction_set = compiler_options.GetInstructionSet();
+ const InstructionSetFeatures* instruction_set_features =
+ compiler_options.GetInstructionSetFeatures();
// i.e. if the method was annotated with @FastNative
const bool is_fast_native = (access_flags & kAccFastNative) != 0u;
@@ -216,7 +216,6 @@ static JniCompiledMethod ArtJniCompileMethodInternal(CompilerDriver* driver,
// Assembler that holds generated instructions
std::unique_ptr<JNIMacroAssembler<kPointerSize>> jni_asm =
GetMacroAssembler<kPointerSize>(&allocator, instruction_set, instruction_set_features);
- const CompilerOptions& compiler_options = driver->GetCompilerOptions();
jni_asm->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
@@ -771,16 +770,16 @@ static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
}
}
-JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+JniCompiledMethod ArtQuickJniCompileMethod(const CompilerOptions& compiler_options,
uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file) {
- if (Is64BitInstructionSet(compiler->GetInstructionSet())) {
+ if (Is64BitInstructionSet(compiler_options.GetInstructionSet())) {
return ArtJniCompileMethodInternal<PointerSize::k64>(
- compiler, access_flags, method_idx, dex_file);
+ compiler_options, access_flags, method_idx, dex_file);
} else {
return ArtJniCompileMethodInternal<PointerSize::k32>(
- compiler, access_flags, method_idx, dex_file);
+ compiler_options, access_flags, method_idx, dex_file);
}
}
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
index 11419947a0..313fcd361e 100644
--- a/compiler/jni/quick/jni_compiler.h
+++ b/compiler/jni/quick/jni_compiler.h
@@ -25,7 +25,7 @@
namespace art {
class ArtMethod;
-class CompilerDriver;
+class CompilerOptions;
class DexFile;
class JniCompiledMethod {
@@ -62,7 +62,7 @@ class JniCompiledMethod {
std::vector<uint8_t> cfi_;
};
-JniCompiledMethod ArtQuickJniCompileMethod(CompilerDriver* compiler,
+JniCompiledMethod ArtQuickJniCompileMethod(const CompilerOptions& compiler_options,
uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index 7b35fd9b0c..b7beb7bdb4 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -40,19 +40,31 @@ class LinkerPatch {
// which is ridiculous given we have only a handful of values here. If we
// choose to squeeze the Type into fewer than 8 bits, we'll have to declare
// patch_type_ as an uintN_t and do explicit static_cast<>s.
+ //
+ // Note: Actual patching is instruction_set-dependent.
enum class Type : uint8_t {
- kDataBimgRelRo, // NOTE: Actual patching is instruction_set-dependent.
- kMethodRelative, // NOTE: Actual patching is instruction_set-dependent.
- kMethodBssEntry, // NOTE: Actual patching is instruction_set-dependent.
- kCall,
- kCallRelative, // NOTE: Actual patching is instruction_set-dependent.
- kTypeRelative, // NOTE: Actual patching is instruction_set-dependent.
- kTypeBssEntry, // NOTE: Actual patching is instruction_set-dependent.
- kStringRelative, // NOTE: Actual patching is instruction_set-dependent.
- kStringBssEntry, // NOTE: Actual patching is instruction_set-dependent.
- kBakerReadBarrierBranch, // NOTE: Actual patching is instruction_set-dependent.
+ kIntrinsicReference, // Boot image reference for an intrinsic, see IntrinsicObjects.
+ kDataBimgRelRo,
+ kMethodRelative,
+ kMethodBssEntry,
+ kCall, // TODO: Remove. (Deprecated, non-PIC.)
+ kCallRelative,
+ kTypeRelative,
+ kTypeBssEntry,
+ kStringRelative,
+ kStringBssEntry,
+ kBakerReadBarrierBranch,
};
+ static LinkerPatch IntrinsicReferencePatch(size_t literal_offset,
+ uint32_t pc_insn_offset,
+ uint32_t intrinsic_data) {
+ LinkerPatch patch(literal_offset, Type::kIntrinsicReference, /* target_dex_file */ nullptr);
+ patch.intrinsic_data_ = intrinsic_data;
+ patch.pc_insn_offset_ = pc_insn_offset;
+ return patch;
+ }
+
static LinkerPatch DataBimgRelRoPatch(size_t literal_offset,
uint32_t pc_insn_offset,
uint32_t boot_image_offset) {
@@ -160,6 +172,7 @@ class LinkerPatch {
bool IsPcRelative() const {
switch (GetType()) {
+ case Type::kIntrinsicReference:
case Type::kDataBimgRelRo:
case Type::kMethodRelative:
case Type::kMethodBssEntry:
@@ -175,6 +188,11 @@ class LinkerPatch {
}
}
+ uint32_t IntrinsicData() const {
+ DCHECK(patch_type_ == Type::kIntrinsicReference);
+ return intrinsic_data_;
+ }
+
uint32_t BootImageOffset() const {
DCHECK(patch_type_ == Type::kDataBimgRelRo);
return boot_image_offset_;
@@ -213,7 +231,8 @@ class LinkerPatch {
}
uint32_t PcInsnOffset() const {
- DCHECK(patch_type_ == Type::kDataBimgRelRo ||
+ DCHECK(patch_type_ == Type::kIntrinsicReference ||
+ patch_type_ == Type::kDataBimgRelRo ||
patch_type_ == Type::kMethodRelative ||
patch_type_ == Type::kMethodBssEntry ||
patch_type_ == Type::kTypeRelative ||
@@ -255,10 +274,12 @@ class LinkerPatch {
uint32_t method_idx_; // Method index for Call/Method patches.
uint32_t type_idx_; // Type index for Type patches.
uint32_t string_idx_; // String index for String patches.
+ uint32_t intrinsic_data_; // Data for IntrinsicObjects.
uint32_t baker_custom_value1_;
static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(string_idx_) == sizeof(cmp1_), "needed by relational operators");
+ static_assert(sizeof(intrinsic_data_) == sizeof(cmp1_), "needed by relational operators");
static_assert(sizeof(baker_custom_value1_) == sizeof(cmp1_), "needed by relational operators");
};
union {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 1523478613..7c29df877a 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -43,7 +43,7 @@ class BoundsCheckEliminationTest : public OptimizingUnitTest {
void RunBCE() {
graph_->BuildDominatorTree();
- InstructionSimplifier(graph_, /* codegen */ nullptr, /* driver */ nullptr).Run();
+ InstructionSimplifier(graph_, /* codegen */ nullptr).Run();
SideEffectsAnalysis side_effects(graph_);
side_effects.Run();
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 4791fa3fba..074f249fe1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -393,6 +393,11 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
+ GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
+ core_spill_mask_,
+ fpu_spill_mask_,
+ GetGraph()->GetNumberOfVRegs());
+
size_t frame_start = GetAssembler()->CodeSize();
GenerateFrameEntry();
DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
@@ -435,6 +440,8 @@ void CodeGenerator::Compile(CodeAllocator* allocator) {
// Finalize instructions in assember;
Finalize(allocator);
+
+ GetStackMapStream()->EndMethod();
}
void CodeGenerator::Finalize(CodeAllocator* allocator) {
@@ -516,7 +523,7 @@ void CodeGenerator::CreateCommonInvokeLocationSummary(
locations->AddTemp(visitor->GetMethodLocation());
break;
}
- } else {
+ } else if (!invoke->IsInvokePolymorphic()) {
locations->AddTemp(visitor->GetMethodLocation());
}
}
@@ -544,6 +551,7 @@ void CodeGenerator::GenerateInvokeStaticOrDirectRuntimeCall(
case kVirtual:
case kInterface:
case kPolymorphic:
+ case kCustom:
LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
UNREACHABLE();
}
@@ -572,6 +580,7 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
entrypoint = kQuickInvokeInterfaceTrampolineWithAccessCheck;
break;
case kPolymorphic:
+ case kCustom:
LOG(FATAL) << "Unexpected invoke type: " << invoke->GetInvokeType();
UNREACHABLE();
}
@@ -579,11 +588,19 @@ void CodeGenerator::GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invok
}
void CodeGenerator::GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke) {
- MoveConstant(invoke->GetLocations()->GetTemp(0), static_cast<int32_t>(invoke->GetType()));
+ // invoke-polymorphic does not use a temporary to convey any additional information (e.g. a
+ // method index) since it requires multiple info from the instruction (registers A, B, H). Not
+ // using the reservation has no effect on the registers used in the runtime call.
QuickEntrypointEnum entrypoint = kQuickInvokePolymorphic;
InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
}
+void CodeGenerator::GenerateInvokeCustomCall(HInvokeCustom* invoke) {
+ MoveConstant(invoke->GetLocations()->GetTemp(0), invoke->GetCallSiteIndex());
+ QuickEntrypointEnum entrypoint = kQuickInvokeCustom;
+ InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
+}
+
void CodeGenerator::CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
DataType::Type field_type,
@@ -867,53 +884,45 @@ void CodeGenerator::AllocateLocations(HInstruction* instruction) {
}
std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph,
- InstructionSet instruction_set,
- const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats) {
ArenaAllocator* allocator = graph->GetAllocator();
- switch (instruction_set) {
+ switch (compiler_options.GetInstructionSet()) {
#ifdef ART_ENABLE_CODEGEN_arm
case InstructionSet::kArm:
case InstructionSet::kThumb2: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) arm::CodeGeneratorARMVIXL(
- graph, *isa_features.AsArmInstructionSetFeatures(), compiler_options, stats));
+ new (allocator) arm::CodeGeneratorARMVIXL(graph, compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case InstructionSet::kArm64: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) arm64::CodeGeneratorARM64(
- graph, *isa_features.AsArm64InstructionSetFeatures(), compiler_options, stats));
+ new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case InstructionSet::kMips: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) mips::CodeGeneratorMIPS(
- graph, *isa_features.AsMipsInstructionSetFeatures(), compiler_options, stats));
+ new (allocator) mips::CodeGeneratorMIPS(graph, compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case InstructionSet::kMips64: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) mips64::CodeGeneratorMIPS64(
- graph, *isa_features.AsMips64InstructionSetFeatures(), compiler_options, stats));
+ new (allocator) mips64::CodeGeneratorMIPS64(graph, compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) x86::CodeGeneratorX86(
- graph, *isa_features.AsX86InstructionSetFeatures(), compiler_options, stats));
+ new (allocator) x86::CodeGeneratorX86(graph, compiler_options, stats));
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case InstructionSet::kX86_64: {
return std::unique_ptr<CodeGenerator>(
- new (allocator) x86_64::CodeGeneratorX86_64(
- graph, *isa_features.AsX86_64InstructionSetFeatures(), compiler_options, stats));
+ new (allocator) x86_64::CodeGeneratorX86_64(graph, compiler_options, stats));
}
#endif
default:
@@ -1045,7 +1054,8 @@ void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region,
void CodeGenerator::RecordPcInfo(HInstruction* instruction,
uint32_t dex_pc,
- SlowPathCode* slow_path) {
+ SlowPathCode* slow_path,
+ bool native_debug_info) {
if (instruction != nullptr) {
// The code generated for some type conversions
// may call the runtime, thus normally requiring a subsequent
@@ -1076,7 +1086,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
if (instruction == nullptr) {
// For stack overflow checks and native-debug-info entries without dex register
// mapping (i.e. start of basic block or start of slow path).
- stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, 0, 0, 0, 0);
+ stack_map_stream->BeginStackMapEntry(dex_pc, native_pc);
stack_map_stream->EndStackMapEntry();
return;
}
@@ -1110,12 +1120,21 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
outer_dex_pc = outer_environment->GetDexPc();
outer_environment_size = outer_environment->Size();
}
+
+ HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
+ bool osr =
+ instruction->IsSuspendCheck() &&
+ (info != nullptr) &&
+ graph_->IsCompilingOsr() &&
+ (inlining_depth == 0);
+ StackMap::Kind kind = native_debug_info
+ ? StackMap::Kind::Debug
+ : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
stack_map_stream->BeginStackMapEntry(outer_dex_pc,
native_pc,
register_mask,
locations->GetStackMask(),
- outer_environment_size,
- inlining_depth);
+ kind);
EmitEnvironment(environment, slow_path);
// Record invoke info, the common case for the trampoline is super and static invokes. Only
// record these to reduce oat file size.
@@ -1128,19 +1147,9 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
}
stack_map_stream->EndStackMapEntry();
- HLoopInformation* info = instruction->GetBlock()->GetLoopInformation();
- if (instruction->IsSuspendCheck() &&
- (info != nullptr) &&
- graph_->IsCompilingOsr() &&
- (inlining_depth == 0)) {
+ if (osr) {
DCHECK_EQ(info->GetSuspendCheck(), instruction);
- // We duplicate the stack map as a marker that this stack map can be an OSR entry.
- // Duplicating it avoids having the runtime recognize and skip an OSR stack map.
DCHECK(info->IsIrreducible());
- stack_map_stream->BeginStackMapEntry(
- dex_pc, native_pc, register_mask, locations->GetStackMask(), outer_environment_size, 0);
- EmitEnvironment(instruction->GetEnvironment(), slow_path);
- stack_map_stream->EndStackMapEntry();
if (kIsDebugBuild) {
for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
HInstruction* in_environment = environment->GetInstructionAt(i);
@@ -1157,14 +1166,6 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction,
}
}
}
- } else if (kIsDebugBuild) {
- // Ensure stack maps are unique, by checking that the native pc in the stack map
- // last emitted is different than the native pc of the stack map just emitted.
- size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
- if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 1),
- stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 2));
- }
}
}
@@ -1186,12 +1187,11 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
// Ensure that we do not collide with the stack map of the previous instruction.
GenerateNop();
}
- RecordPcInfo(instruction, dex_pc, slow_path);
+ RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true);
}
}
void CodeGenerator::RecordCatchBlockInfo() {
- ArenaAllocator* allocator = graph_->GetAllocator();
StackMapStream* stack_map_stream = GetStackMapStream();
for (HBasicBlock* block : *block_order_) {
@@ -1201,30 +1201,23 @@ void CodeGenerator::RecordCatchBlockInfo() {
uint32_t dex_pc = block->GetDexPc();
uint32_t num_vregs = graph_->GetNumberOfVRegs();
- uint32_t inlining_depth = 0; // Inlining of catch blocks is not supported at the moment.
uint32_t native_pc = GetAddressOf(block);
- uint32_t register_mask = 0; // Not used.
-
- // The stack mask is not used, so we leave it empty.
- ArenaBitVector* stack_mask =
- ArenaBitVector::Create(allocator, 0, /* expandable */ true, kArenaAllocCodeGenerator);
stack_map_stream->BeginStackMapEntry(dex_pc,
native_pc,
- register_mask,
- stack_mask,
- num_vregs,
- inlining_depth);
+ /* register_mask */ 0,
+ /* stack_mask */ nullptr,
+ StackMap::Kind::Catch);
HInstruction* current_phi = block->GetFirstPhi();
for (size_t vreg = 0; vreg < num_vregs; ++vreg) {
- while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
- HInstruction* next_phi = current_phi->GetNext();
- DCHECK(next_phi == nullptr ||
- current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
- << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
- current_phi = next_phi;
- }
+ while (current_phi != nullptr && current_phi->AsPhi()->GetRegNumber() < vreg) {
+ HInstruction* next_phi = current_phi->GetNext();
+ DCHECK(next_phi == nullptr ||
+ current_phi->AsPhi()->GetRegNumber() <= next_phi->AsPhi()->GetRegNumber())
+ << "Phis need to be sorted by vreg number to keep this a linear-time loop.";
+ current_phi = next_phi;
+ }
if (current_phi == nullptr || current_phi->AsPhi()->GetRegNumber() != vreg) {
stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
@@ -1284,50 +1277,45 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
continue;
}
+ using Kind = DexRegisterLocation::Kind;
Location location = environment->GetLocationAt(i);
switch (location.GetKind()) {
case Location::kConstant: {
DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsDoubleConstant()) {
int64_t value = bit_cast<int64_t, double>(current->AsDoubleConstant()->GetValue());
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kConstant, Low32Bits(value));
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kConstant, High32Bits(value));
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, Low32Bits(value));
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, High32Bits(value));
++i;
DCHECK_LT(i, environment_size);
} else if (current->IsIntConstant()) {
int32_t value = current->AsIntConstant()->GetValue();
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
} else if (current->IsNullConstant()) {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, 0);
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, 0);
} else {
DCHECK(current->IsFloatConstant()) << current->DebugName();
int32_t value = bit_cast<int32_t, float>(current->AsFloatConstant()->GetValue());
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kConstant, value);
+ stack_map_stream->AddDexRegisterEntry(Kind::kConstant, value);
}
break;
}
case Location::kStackSlot: {
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
break;
}
case Location::kDoubleStackSlot: {
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, location.GetStackIndex());
stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInStack, location.GetStackIndex());
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInStack, location.GetHighStackIndex(kVRegSize));
+ Kind::kInStack, location.GetHighStackIndex(kVRegSize));
++i;
DCHECK_LT(i, environment_size);
break;
@@ -1337,17 +1325,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(id);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, id);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, id);
if (current->GetType() == DataType::Type::kInt64) {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegisterHigh, id);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -1359,18 +1346,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int id = location.reg();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(id)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(id);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInStack, offset + kVRegSize);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset + kVRegSize);
++i;
DCHECK_LT(i, environment_size);
}
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, id);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, id);
if (current->GetType() == DataType::Type::kFloat64) {
- stack_map_stream->AddDexRegisterEntry(
- DexRegisterLocation::Kind::kInFpuRegisterHigh, id);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegisterHigh, id);
++i;
DCHECK_LT(i, environment_size);
}
@@ -1383,16 +1368,16 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(low);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, low);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, low);
}
if (slow_path != nullptr && slow_path->IsFpuRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfFpuRegister(high);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
++i;
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInFpuRegister, high);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInFpuRegister, high);
++i;
}
DCHECK_LT(i, environment_size);
@@ -1404,15 +1389,15 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
int high = location.high();
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(low)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(low);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, low);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, low);
}
if (slow_path != nullptr && slow_path->IsCoreRegisterSaved(high)) {
uint32_t offset = slow_path->GetStackOffsetOfCoreRegister(high);
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInStack, offset);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInStack, offset);
} else {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kInRegister, high);
+ stack_map_stream->AddDexRegisterEntry(Kind::kInRegister, high);
}
++i;
DCHECK_LT(i, environment_size);
@@ -1420,7 +1405,7 @@ void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slo
}
case Location::kInvalid: {
- stack_map_stream->AddDexRegisterEntry(DexRegisterLocation::Kind::kNone, 0);
+ stack_map_stream->AddDexRegisterEntry(Kind::kNone, 0);
break;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a340446ac3..59f858ea52 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -188,8 +188,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
// Compiles the graph to executable instructions.
void Compile(CodeAllocator* allocator);
static std::unique_ptr<CodeGenerator> Create(HGraph* graph,
- InstructionSet instruction_set,
- const InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGenerator();
@@ -323,7 +321,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
}
// Record native to dex mapping for a suspend point. Required by runtime.
- void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
+ void RecordPcInfo(HInstruction* instruction,
+ uint32_t dex_pc,
+ SlowPathCode* slow_path = nullptr,
+ bool native_debug_info = false);
// Check whether we have already recorded mapping at this PC.
bool HasStackMapAtCurrentPc();
// Record extra stack maps if we support native debugging.
@@ -542,10 +543,13 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
void GenerateInvokeStaticOrDirectRuntimeCall(
HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path);
+
void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
void GenerateInvokePolymorphicCall(HInvokePolymorphic* invoke);
+ void GenerateInvokeCustomCall(HInvokeCustom* invoke);
+
void CreateUnresolvedFieldLocationSummary(
HInstruction* field_access,
DataType::Type field_type,
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6f173e19f5..26c9e9fa2b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -27,6 +27,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_arm64.h"
@@ -68,7 +69,7 @@ using helpers::InputCPURegisterOrZeroRegAt;
using helpers::InputFPRegisterAt;
using helpers::InputOperandAt;
using helpers::InputRegisterAt;
-using helpers::Int64ConstantFrom;
+using helpers::Int64FromLocation;
using helpers::IsConstantZeroBitPattern;
using helpers::LocationFrom;
using helpers::OperandFromMemOperand;
@@ -1373,7 +1374,6 @@ Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
}
CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
- const Arm64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -1390,7 +1390,6 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
assembler_(graph->GetAllocator()),
- isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
@@ -1401,6 +1400,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
@@ -1728,6 +1728,10 @@ void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg
stream << DRegister(reg);
}
+const Arm64InstructionSetFeatures& CodeGeneratorARM64::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsArm64InstructionSetFeatures();
+}
+
void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
if (constant->IsIntConstant()) {
__ Mov(Register(destination), constant->AsIntConstant()->GetValue());
@@ -2459,6 +2463,9 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
// all & reg_bits - 1.
__ Ror(dst, lhs, RegisterFrom(instr->GetLocations()->InAt(1), type));
}
+ } else if (instr->IsMin() || instr->IsMax()) {
+ __ Cmp(lhs, rhs);
+ __ Csel(dst, lhs, rhs, instr->IsMin() ? lt : gt);
} else {
DCHECK(instr->IsXor());
__ Eor(dst, lhs, rhs);
@@ -2474,6 +2481,10 @@ void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
__ Fadd(dst, lhs, rhs);
} else if (instr->IsSub()) {
__ Fsub(dst, lhs, rhs);
+ } else if (instr->IsMin()) {
+ __ Fmin(dst, lhs, rhs);
+ } else if (instr->IsMax()) {
+ __ Fmax(dst, lhs, rhs);
} else {
LOG(FATAL) << "Unexpected floating-point binary operation";
}
@@ -2694,7 +2705,7 @@ void LocationsBuilderARM64::VisitIntermediateAddressIndex(HIntermediateAddressIn
void InstructionCodeGeneratorARM64::VisitIntermediateAddressIndex(
HIntermediateAddressIndex* instruction) {
Register index_reg = InputRegisterAt(instruction, 0);
- uint32_t shift = Int64ConstantFrom(instruction->GetLocations()->InAt(2));
+ uint32_t shift = Int64FromLocation(instruction->GetLocations()->InAt(2));
uint32_t offset = instruction->GetOffset()->AsIntConstant()->GetValue();
if (shift == 0) {
@@ -2824,7 +2835,7 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
if (index.IsConstant()) {
// Array load with a constant index can be treated as a field load.
- offset += Int64ConstantFrom(index) << DataType::SizeShift(type);
+ offset += Int64FromLocation(index) << DataType::SizeShift(type);
Location maybe_temp =
(locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location::NoLocation();
codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
@@ -2869,14 +2880,14 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
"Expecting 0=compressed, 1=uncompressed");
__ Tbnz(length.W(), 0, &uncompressed_load);
__ Ldrb(Register(OutputCPURegister(instruction)),
- HeapOperand(obj, offset + Int64ConstantFrom(index)));
+ HeapOperand(obj, offset + Int64FromLocation(index)));
__ B(&done);
__ Bind(&uncompressed_load);
__ Ldrh(Register(OutputCPURegister(instruction)),
- HeapOperand(obj, offset + (Int64ConstantFrom(index) << 1)));
+ HeapOperand(obj, offset + (Int64FromLocation(index) << 1)));
__ Bind(&done);
} else {
- offset += Int64ConstantFrom(index) << DataType::SizeShift(type);
+ offset += Int64FromLocation(index) << DataType::SizeShift(type);
source = HeapOperand(obj, offset);
}
} else {
@@ -2989,7 +3000,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
if (!needs_write_barrier) {
DCHECK(!may_need_runtime_call_for_type_check);
if (index.IsConstant()) {
- offset += Int64ConstantFrom(index) << DataType::SizeShift(value_type);
+ offset += Int64FromLocation(index) << DataType::SizeShift(value_type);
destination = HeapOperand(array, offset);
} else {
UseScratchRegisterScope temps(masm);
@@ -3027,7 +3038,7 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireSameSizeAs(array);
if (index.IsConstant()) {
- offset += Int64ConstantFrom(index) << DataType::SizeShift(value_type);
+ offset += Int64FromLocation(index) << DataType::SizeShift(value_type);
destination = HeapOperand(array, offset);
} else {
destination = HeapOperand(temp,
@@ -3336,61 +3347,30 @@ FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
#undef DEFINE_CONDITION_VISITORS
#undef FOR_EACH_CONDITION_INSTRUCTION
-void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
-
- LocationSummary* locations = instruction->GetLocations();
- Location second = locations->InAt(1);
- DCHECK(second.IsConstant());
+void InstructionCodeGeneratorARM64::GenerateIntDivForPower2Denom(HDiv* instruction) {
+ int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
+ uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
+ DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm;
Register out = OutputRegister(instruction);
Register dividend = InputRegisterAt(instruction, 0);
- int64_t imm = Int64FromConstant(second.GetConstant());
- DCHECK(imm == 1 || imm == -1);
- if (instruction->IsRem()) {
- __ Mov(out, 0);
+ if (abs_imm == 2) {
+ int bits = DataType::Size(instruction->GetResultType()) * kBitsPerByte;
+ __ Add(out, dividend, Operand(dividend, LSR, bits - 1));
} else {
- if (imm == 1) {
- __ Mov(out, dividend);
- } else {
- __ Neg(out, dividend);
- }
- }
-}
-
-void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
-
- LocationSummary* locations = instruction->GetLocations();
- Location second = locations->InAt(1);
- DCHECK(second.IsConstant());
-
- Register out = OutputRegister(instruction);
- Register dividend = InputRegisterAt(instruction, 0);
- int64_t imm = Int64FromConstant(second.GetConstant());
- uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
- int ctz_imm = CTZ(abs_imm);
-
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = temps.AcquireSameSizeAs(out);
-
- if (instruction->IsDiv()) {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
__ Add(temp, dividend, abs_imm - 1);
__ Cmp(dividend, 0);
__ Csel(out, temp, dividend, lt);
- if (imm > 0) {
- __ Asr(out, out, ctz_imm);
- } else {
- __ Neg(out, Operand(out, ASR, ctz_imm));
- }
+ }
+
+ int ctz_imm = CTZ(abs_imm);
+ if (imm > 0) {
+ __ Asr(out, out, ctz_imm);
} else {
- int bits = instruction->GetResultType() == DataType::Type::kInt32 ? 32 : 64;
- __ Asr(temp, dividend, bits - 1);
- __ Lsr(temp, temp, bits - ctz_imm);
- __ Add(out, dividend, temp);
- __ And(out, out, abs_imm - 1);
- __ Sub(out, out, temp);
+ __ Neg(out, Operand(out, ASR, ctz_imm));
}
}
@@ -3446,39 +3426,34 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati
}
}
-void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
- DCHECK(instruction->IsDiv() || instruction->IsRem());
- DataType::Type type = instruction->GetResultType();
- DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
+void InstructionCodeGeneratorARM64::GenerateIntDivForConstDenom(HDiv *instruction) {
+ int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
- LocationSummary* locations = instruction->GetLocations();
- Register out = OutputRegister(instruction);
- Location second = locations->InAt(1);
+ if (imm == 0) {
+ // Do not generate anything. DivZeroCheck would prevent any code to be executed.
+ return;
+ }
- if (second.IsConstant()) {
- int64_t imm = Int64FromConstant(second.GetConstant());
+ if (IsPowerOfTwo(AbsOrMin(imm))) {
+ GenerateIntDivForPower2Denom(instruction);
+ } else {
+ // Cases imm == -1 or imm == 1 are handled by InstructionSimplifier.
+ DCHECK(imm < -2 || imm > 2) << imm;
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
- if (imm == 0) {
- // Do not generate anything. DivZeroCheck would prevent any code to be executed.
- } else if (imm == 1 || imm == -1) {
- DivRemOneOrMinusOne(instruction);
- } else if (IsPowerOfTwo(AbsOrMin(imm))) {
- DivRemByPowerOfTwo(instruction);
- } else {
- DCHECK(imm <= -2 || imm >= 2);
- GenerateDivRemWithAnyConstant(instruction);
- }
+void InstructionCodeGeneratorARM64::GenerateIntDiv(HDiv *instruction) {
+ DCHECK(DataType::IsIntOrLongType(instruction->GetResultType()))
+ << instruction->GetResultType();
+
+ if (instruction->GetLocations()->InAt(1).IsConstant()) {
+ GenerateIntDivForConstDenom(instruction);
} else {
+ Register out = OutputRegister(instruction);
Register dividend = InputRegisterAt(instruction, 0);
Register divisor = InputRegisterAt(instruction, 1);
- if (instruction->IsDiv()) {
- __ Sdiv(out, dividend, divisor);
- } else {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = temps.AcquireSameSizeAs(out);
- __ Sdiv(temp, dividend, divisor);
- __ Msub(out, temp, divisor, dividend);
- }
+ __ Sdiv(out, dividend, divisor);
}
}
@@ -3510,7 +3485,7 @@ void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
switch (type) {
case DataType::Type::kInt32:
case DataType::Type::kInt64:
- GenerateDivRemIntegral(div);
+ GenerateIntDiv(div);
break;
case DataType::Type::kFloat32:
@@ -3542,7 +3517,7 @@ void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction
}
if (value.IsConstant()) {
- int64_t divisor = Int64ConstantFrom(value);
+ int64_t divisor = Int64FromLocation(value);
if (divisor == 0) {
__ B(slow_path->GetEntryLabel());
} else {
@@ -4695,6 +4670,22 @@ void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* i
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
+void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
+}
+
+vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch(
+ uint32_t intrinsic_data,
+ vixl::aarch64::Label* adrp_label) {
+ return NewPcRelativePatch(
+ /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+}
+
vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
vixl::aarch64::Label* adrp_label) {
@@ -4812,6 +4803,55 @@ void CodeGeneratorARM64::EmitLdrOffsetPlaceholder(vixl::aarch64::Label* fixup_la
__ ldr(out, MemOperand(base, /* offset placeholder */ 0));
}
+void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
+ uint32_t boot_image_reference) {
+ if (GetCompilerOptions().IsBootImage()) {
+ // Add ADRP with its PC-relative type patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageIntrinsicPatch(boot_image_reference);
+ EmitAdrpPlaceholder(adrp_label, reg.X());
+ // Add ADD with its PC-relative type patch.
+ vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
+ EmitAddPlaceholder(add_label, reg.X(), reg.X());
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ // Add ADRP with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
+ EmitAdrpPlaceholder(adrp_label, reg.X());
+ // Add LDR with its PC-relative .data.bimg.rel.ro patch.
+ vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label);
+ EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
+ __ Ldr(reg.W(), DeduplicateBootImageAddressLiteral(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
+void CodeGeneratorARM64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ // Add ADRP with its PC-relative type patch.
+ vixl::aarch64::Label* adrp_label = NewBootImageTypePatch(*target_method.dex_file, type_idx);
+ EmitAdrpPlaceholder(adrp_label, argument.X());
+ // Add ADD with its PC-relative type patch.
+ vixl::aarch64::Label* add_label =
+ NewBootImageTypePatch(*target_method.dex_file, type_idx, adrp_label);
+ EmitAddPlaceholder(add_label, argument.X(), argument.X());
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
@@ -4824,12 +4864,13 @@ inline void CodeGeneratorARM64::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -4841,6 +4882,7 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
string_bss_entry_patches_.size() +
+ boot_image_intrinsic_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
@@ -4850,11 +4892,14 @@ void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* lin
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
@@ -5633,13 +5678,81 @@ void LocationsBuilderARM64::VisitRem(HRem* rem) {
}
}
+void InstructionCodeGeneratorARM64::GenerateIntRemForPower2Denom(HRem *instruction) {
+ int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
+ uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
+ DCHECK(IsPowerOfTwo(abs_imm)) << abs_imm;
+
+ Register out = OutputRegister(instruction);
+ Register dividend = InputRegisterAt(instruction, 0);
+
+ if (abs_imm == 2) {
+ __ Cmp(dividend, 0);
+ __ And(out, dividend, 1);
+ __ Csneg(out, out, out, ge);
+ } else {
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
+
+ __ Negs(temp, dividend);
+ __ And(out, dividend, abs_imm - 1);
+ __ And(temp, temp, abs_imm - 1);
+ __ Csneg(out, out, temp, mi);
+ }
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRemForOneOrMinusOneDenom(HRem *instruction) {
+ int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
+ DCHECK(imm == 1 || imm == -1) << imm;
+
+ Register out = OutputRegister(instruction);
+ __ Mov(out, 0);
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRemForConstDenom(HRem *instruction) {
+ int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
+
+ if (imm == 0) {
+ // Do not generate anything.
+ // DivZeroCheck would prevent any code to be executed.
+ return;
+ }
+
+ if (imm == 1 || imm == -1) {
+ // TODO: These cases need to be optimized in InstructionSimplifier
+ GenerateIntRemForOneOrMinusOneDenom(instruction);
+ } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ GenerateIntRemForPower2Denom(instruction);
+ } else {
+ DCHECK(imm < -2 || imm > 2) << imm;
+ GenerateDivRemWithAnyConstant(instruction);
+ }
+}
+
+void InstructionCodeGeneratorARM64::GenerateIntRem(HRem* instruction) {
+ DCHECK(DataType::IsIntOrLongType(instruction->GetResultType()))
+ << instruction->GetResultType();
+
+ if (instruction->GetLocations()->InAt(1).IsConstant()) {
+ GenerateIntRemForConstDenom(instruction);
+ } else {
+ Register out = OutputRegister(instruction);
+ Register dividend = InputRegisterAt(instruction, 0);
+ Register divisor = InputRegisterAt(instruction, 1);
+ UseScratchRegisterScope temps(GetVIXLAssembler());
+ Register temp = temps.AcquireSameSizeAs(out);
+ __ Sdiv(temp, dividend, divisor);
+ __ Msub(out, temp, divisor, dividend);
+ }
+}
+
void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
DataType::Type type = rem->GetResultType();
switch (type) {
case DataType::Type::kInt32:
case DataType::Type::kInt64: {
- GenerateDivRemIntegral(rem);
+ GenerateIntRem(rem);
break;
}
@@ -5662,111 +5775,20 @@ void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
}
}
-// TODO: integrate with HandleBinaryOp?
-static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
- LocationSummary* locations = new (allocator) LocationSummary(minmax);
- switch (minmax->GetResultType()) {
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
- break;
- default:
- LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
- }
-}
-
-void InstructionCodeGeneratorARM64::GenerateMinMaxInt(LocationSummary* locations,
- bool is_min,
- DataType::Type type) {
- Location op1 = locations->InAt(0);
- Location op2 = locations->InAt(1);
- Location out = locations->Out();
-
- Register op1_reg;
- Register op2_reg;
- Register out_reg;
- if (type == DataType::Type::kInt64) {
- op1_reg = XRegisterFrom(op1);
- op2_reg = XRegisterFrom(op2);
- out_reg = XRegisterFrom(out);
- } else {
- DCHECK_EQ(type, DataType::Type::kInt32);
- op1_reg = WRegisterFrom(op1);
- op2_reg = WRegisterFrom(op2);
- out_reg = WRegisterFrom(out);
- }
-
- __ Cmp(op1_reg, op2_reg);
- __ Csel(out_reg, op1_reg, op2_reg, is_min ? lt : gt);
-}
-
-void InstructionCodeGeneratorARM64::GenerateMinMaxFP(LocationSummary* locations,
- bool is_min,
- DataType::Type type) {
- Location op1 = locations->InAt(0);
- Location op2 = locations->InAt(1);
- Location out = locations->Out();
-
- FPRegister op1_reg;
- FPRegister op2_reg;
- FPRegister out_reg;
- if (type == DataType::Type::kFloat64) {
- op1_reg = DRegisterFrom(op1);
- op2_reg = DRegisterFrom(op2);
- out_reg = DRegisterFrom(out);
- } else {
- DCHECK_EQ(type, DataType::Type::kFloat32);
- op1_reg = SRegisterFrom(op1);
- op2_reg = SRegisterFrom(op2);
- out_reg = SRegisterFrom(out);
- }
-
- if (is_min) {
- __ Fmin(out_reg, op1_reg, op2_reg);
- } else {
- __ Fmax(out_reg, op1_reg, op2_reg);
- }
-}
-
-// TODO: integrate with HandleBinaryOp?
-void InstructionCodeGeneratorARM64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
- DataType::Type type = minmax->GetResultType();
- switch (type) {
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- GenerateMinMaxInt(minmax->GetLocations(), is_min, type);
- break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
- break;
- default:
- LOG(FATAL) << "Unexpected type for HMinMax " << type;
- }
-}
-
void LocationsBuilderARM64::VisitMin(HMin* min) {
- CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
+ HandleBinaryOp(min);
}
void InstructionCodeGeneratorARM64::VisitMin(HMin* min) {
- GenerateMinMax(min, /*is_min*/ true);
+ HandleBinaryOp(min);
}
void LocationsBuilderARM64::VisitMax(HMax* max) {
- CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
+ HandleBinaryOp(max);
}
void InstructionCodeGeneratorARM64::VisitMax(HMax* max) {
- GenerateMinMax(max, /*is_min*/ false);
+ HandleBinaryOp(max);
}
void LocationsBuilderARM64::VisitAbs(HAbs* abs) {
@@ -6687,7 +6709,7 @@ void CodeGeneratorARM64::GenerateRawReferenceLoad(HInstruction* instruction,
// ArrayGet and UnsafeGetObject and UnsafeCASObject intrinsics cases.
// /* HeapReference<mirror::Object> */ ref = *(obj + offset + (index << scale_factor))
if (index.IsConstant()) {
- uint32_t computed_offset = offset + (Int64ConstantFrom(index) << scale_factor);
+ uint32_t computed_offset = offset + (Int64FromLocation(index) << scale_factor);
EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
Load(type, ref_reg, HeapOperand(obj, computed_offset));
if (needs_null_check) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e7fe5b71b7..c44fa48066 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -280,10 +280,6 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
void HandleCondition(HCondition* instruction);
- void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type);
- void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
- void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
-
// Generate a heap reference load using one register `out`:
//
// out <- *(out + offset)
@@ -326,7 +322,13 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
void DivRemOneOrMinusOne(HBinaryOperation* instruction);
void DivRemByPowerOfTwo(HBinaryOperation* instruction);
void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
- void GenerateDivRemIntegral(HBinaryOperation* instruction);
+ void GenerateIntDiv(HDiv* instruction);
+ void GenerateIntDivForConstDenom(HDiv *instruction);
+ void GenerateIntDivForPower2Denom(HDiv *instruction);
+ void GenerateIntRem(HRem* instruction);
+ void GenerateIntRemForConstDenom(HRem *instruction);
+ void GenerateIntRemForOneOrMinusOneDenom(HRem *instruction);
+ void GenerateIntRemForPower2Denom(HRem *instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
vixl::aarch64::MemOperand VecAddress(
@@ -403,7 +405,6 @@ class ParallelMoveResolverARM64 : public ParallelMoveResolverNoSwap {
class CodeGeneratorARM64 : public CodeGenerator {
public:
CodeGeneratorARM64(HGraph* graph,
- const Arm64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARM64() {}
@@ -476,9 +477,7 @@ class CodeGeneratorARM64 : public CodeGenerator {
return InstructionSet::kArm64;
}
- const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const {
- return isa_features_;
- }
+ const Arm64InstructionSetFeatures& GetInstructionSetFeatures() const;
void Initialize() OVERRIDE {
block_labels_.resize(GetGraph()->GetBlocks().size());
@@ -561,6 +560,13 @@ class CodeGeneratorARM64 : public CodeGenerator {
UNIMPLEMENTED(FATAL);
}
+ // Add a new boot image intrinsic patch for an instruction and return the label
+ // to be bound before the instruction. The instruction will be either the
+ // ADRP (pass `adrp_label = null`) or the ADD (pass `adrp_label` pointing
+ // to the associated ADRP patch label).
+ vixl::aarch64::Label* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
+ vixl::aarch64::Label* adrp_label = nullptr);
+
// Add a new boot image relocation patch for an instruction and return the label
// to be bound before the instruction. The instruction will be either the
// ADRP (pass `adrp_label = null`) or the LDR (pass `adrp_label` pointing
@@ -634,6 +640,9 @@ class CodeGeneratorARM64 : public CodeGenerator {
vixl::aarch64::Register out,
vixl::aarch64::Register base);
+ void LoadBootImageAddress(vixl::aarch64::Register reg, uint32_t boot_image_reference);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
@@ -892,7 +901,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
InstructionCodeGeneratorARM64 instruction_visitor_;
ParallelMoveResolverARM64 move_resolver_;
Arm64Assembler assembler_;
- const Arm64InstructionSetFeatures& isa_features_;
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
@@ -911,6 +919,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
// PC-relative String patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
// Baker read barrier patch info.
ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 859e1597c6..9e1ef4002e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -27,7 +27,9 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
+#include "intrinsics.h"
#include "intrinsics_arm_vixl.h"
#include "linker/linker_patch.h"
#include "mirror/array-inl.h"
@@ -1501,6 +1503,10 @@ void CodeGeneratorARMVIXL::DumpFloatingPointRegister(std::ostream& stream, int r
stream << vixl32::SRegister(reg);
}
+const ArmInstructionSetFeatures& CodeGeneratorARMVIXL::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsArmInstructionSetFeatures();
+}
+
static uint32_t ComputeSRegisterListMask(const SRegisterList& regs) {
uint32_t mask = 0;
for (uint32_t i = regs.GetFirstSRegister().GetCode();
@@ -2318,7 +2324,6 @@ vixl32::Label* CodeGeneratorARMVIXL::GetFinalLabel(HInstruction* instruction,
}
CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
- const ArmInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -2335,7 +2340,6 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
assembler_(graph->GetAllocator()),
- isa_features_(isa_features),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
@@ -2344,6 +2348,7 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph,
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
@@ -3742,6 +3747,15 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic*
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9);
}
+void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+}
+
void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -5493,7 +5507,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction
codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
}
void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) {
@@ -5513,7 +5527,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) {
codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
DCHECK(!codegen_->IsLeafMethod());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
}
void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) {
@@ -7084,7 +7098,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instructi
return;
}
GenerateSuspendCheck(instruction, nullptr);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
}
void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction,
@@ -7437,7 +7451,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
HLoadClass::LoadKind load_kind = cls->GetLoadKind();
if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
codegen_->GenerateLoadClassRuntimeCall(cls);
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
return;
}
DCHECK(!cls->NeedsAccessCheck());
@@ -7523,7 +7537,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
} else {
__ Bind(slow_path->GetExitLabel());
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
}
}
@@ -7732,7 +7746,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->AddSlowPath(slow_path);
__ CompareAndBranchIfZero(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
return;
}
case HLoadString::LoadKind::kJitTableAddress: {
@@ -7754,7 +7768,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
}
static int32_t GetExceptionTlsOffset() {
@@ -8384,7 +8398,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* i
} else {
CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
}
- codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17);
+ codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18);
}
void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) {
@@ -8883,7 +8897,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
// Note that GC roots are not affected by heap poisoning, thus we
// do not have to unpoison `root_reg` here.
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 18);
+ MaybeGenerateMarkingRegisterCheck(/* code */ 19);
}
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -8963,7 +8977,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET
: BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 19, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
return;
}
@@ -9041,7 +9055,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(HInstruction* i
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET);
}
- MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
+ MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
return;
}
@@ -9095,7 +9109,7 @@ void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstructio
// Fast path: the GC is not marking: just load the reference.
GenerateRawReferenceLoad(instruction, ref, obj, offset, index, scale_factor, needs_null_check);
__ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ 21);
+ MaybeGenerateMarkingRegisterCheck(/* code */ 22);
}
void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
@@ -9150,7 +9164,7 @@ void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction
// Fast path: the GC is not marking: nothing to do (the field is
// up-to-date, and we don't need to load the reference).
__ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ 22);
+ MaybeGenerateMarkingRegisterCheck(/* code */ 23);
}
void CodeGeneratorARMVIXL::GenerateRawReferenceLoad(HInstruction* instruction,
@@ -9450,6 +9464,11 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall(
}
}
+CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
+ uint32_t intrinsic_data) {
+ return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+}
+
CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
uint32_t boot_image_offset) {
return NewPcRelativePatch(/* dex_file */ nullptr,
@@ -9527,6 +9546,46 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil
});
}
+void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
+ uint32_t boot_image_reference) {
+ if (GetCompilerOptions().IsBootImage()) {
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ NewBootImageIntrinsicPatch(boot_image_reference);
+ EmitMovwMovtPlaceholder(labels, reg);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
+ NewBootImageRelRoPatch(boot_image_reference);
+ EmitMovwMovtPlaceholder(labels, reg);
+ __ Ldr(reg, MemOperand(reg, /* offset */ 0));
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference);
+ __ Ldr(reg, DeduplicateBootImageAddressLiteral(dchecked_integral_cast<uint32_t>(address)));
+ }
+}
+
+void CodeGeneratorARMVIXL::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ vixl32::Register argument = calling_convention.GetRegisterAt(0);
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ PcRelativePatchInfo* labels = NewBootImageTypePatch(*target_method.dex_file, type_idx);
+ EmitMovwMovtPlaceholder(labels, argument);
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
const ArenaDeque<PcRelativePatchInfo>& infos,
@@ -9547,12 +9606,13 @@ inline void CodeGeneratorARMVIXL::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -9564,6 +9624,7 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* l
/* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * boot_image_string_patches_.size() +
/* MOVW+MOVT for each entry */ 2u * string_bss_entry_patches_.size() +
+ /* MOVW+MOVT for each entry */ 2u * boot_image_intrinsic_patches_.size() +
baker_read_barrier_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
@@ -9573,11 +9634,14 @@ void CodeGeneratorARMVIXL::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* l
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index d5b739bd7c..fc8cf98173 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -428,7 +428,6 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator {
class CodeGeneratorARMVIXL : public CodeGenerator {
public:
CodeGeneratorARMVIXL(HGraph* graph,
- const ArmInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARMVIXL() {}
@@ -475,6 +474,9 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ParallelMoveResolver* GetMoveResolver() OVERRIDE { return &move_resolver_; }
InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kThumb2; }
+
+ const ArmInstructionSetFeatures& GetInstructionSetFeatures() const;
+
// Helper method to move a 32-bit value between two locations.
void Move32(Location destination, Location source);
@@ -523,8 +525,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
void Finalize(CodeAllocator* allocator) OVERRIDE;
- const ArmInstructionSetFeatures& GetInstructionSetFeatures() const { return isa_features_; }
-
bool NeedsTwoRegisters(DataType::Type type) const OVERRIDE {
return type == DataType::Type::kFloat64 || type == DataType::Type::kInt64;
}
@@ -578,6 +578,7 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
vixl::aarch32::Label add_pc_label;
};
+ PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data);
PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset);
PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method);
PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method);
@@ -600,6 +601,9 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
+ void LoadBootImageAddress(vixl::aarch32::Register reg, uint32_t boot_image_reference);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
+
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
bool NeedsThunkCode(const linker::LinkerPatch& patch) const OVERRIDE;
void EmitThunkCode(const linker::LinkerPatch& patch,
@@ -886,7 +890,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ParallelMoveResolverARMVIXL move_resolver_;
ArmVIXLAssembler assembler_;
- const ArmInstructionSetFeatures& isa_features_;
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
@@ -903,6 +906,8 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
// PC-relative String patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
// Baker read barrier patch info.
ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 8be84a15bd..f0ef30ee37 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -26,6 +26,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips.h"
@@ -996,7 +997,6 @@ class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
};
CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
- const MipsInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -1013,8 +1013,8 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
- assembler_(graph->GetAllocator(), &isa_features),
- isa_features_(isa_features),
+ assembler_(graph->GetAllocator(),
+ compiler_options.GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
@@ -1023,6 +1023,7 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
clobbered_ra_(false) {
@@ -1596,12 +1597,13 @@ inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -1612,7 +1614,8 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* link
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- string_bss_entry_patches_.size();
+ string_bss_entry_patches_.size() +
+ boot_image_intrinsic_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
@@ -1621,11 +1624,14 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* link
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
@@ -1636,6 +1642,13 @@ void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* link
DCHECK_EQ(size, linker_patches->size());
}
+CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageIntrinsicPatch(
+ uint32_t intrinsic_data,
+ const PcRelativePatchInfo* info_high) {
+ return NewPcRelativePatch(
+ /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+}
+
CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
@@ -1739,6 +1752,48 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo
// offset to `out` (e.g. lw, jialc, addiu).
}
+void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_reference) {
+ if (GetCompilerOptions().IsBootImage()) {
+ PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
+ PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
+ __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
+ __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
+ __ LoadConst32(reg, dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address)));
+ }
+}
+
+void CodeGeneratorMIPS::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
+ PcRelativePatchInfo* info_low =
+ NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO);
+ __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label);
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
const DexFile& dex_file,
dex::StringIndex string_index,
@@ -1895,6 +1950,10 @@ void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg)
stream << FRegister(reg);
}
+const MipsInstructionSetFeatures& CodeGeneratorMIPS::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsMipsInstructionSetFeatures();
+}
+
constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -7795,6 +7854,14 @@ void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* in
codegen_->GenerateInvokePolymorphicCall(invoke);
}
+void LocationsBuilderMIPS::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS intrinsic(codegen);
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 2e7c736dbd..4830ac9bc6 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -370,7 +370,6 @@ class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
class CodeGeneratorMIPS : public CodeGenerator {
public:
CodeGeneratorMIPS(HGraph* graph,
- const MipsInstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS() {}
@@ -509,9 +508,7 @@ class CodeGeneratorMIPS : public CodeGenerator {
InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips; }
- const MipsInstructionSetFeatures& GetInstructionSetFeatures() const {
- return isa_features_;
- }
+ const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
MipsLabel* GetLabelOf(HBasicBlock* block) const {
return CommonGetLabelOf<MipsLabel>(block_labels_, block);
@@ -621,6 +618,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
};
+ PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
+ const PcRelativePatchInfo* info_high = nullptr);
PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high = nullptr);
PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
@@ -645,6 +644,9 @@ class CodeGeneratorMIPS : public CodeGenerator {
Register out,
Register base);
+ void LoadBootImageAddress(Register reg, uint32_t boot_image_reference);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
+
// The JitPatchInfo is used for JIT string and class loads.
struct JitPatchInfo {
JitPatchInfo(const DexFile& dex_file, uint64_t idx)
@@ -693,7 +695,6 @@ class CodeGeneratorMIPS : public CodeGenerator {
InstructionCodeGeneratorMIPS instruction_visitor_;
ParallelMoveResolverMIPS move_resolver_;
MipsAssembler assembler_;
- const MipsInstructionSetFeatures& isa_features_;
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
@@ -710,6 +711,8 @@ class CodeGeneratorMIPS : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
// PC-relative String patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<JitPatchInfo> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index cd9e0e521e..6e72727f59 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -24,6 +24,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_mips64.h"
@@ -939,7 +940,6 @@ class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
};
CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
- const Mips64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -956,8 +956,8 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
location_builder_(graph, this),
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
- assembler_(graph->GetAllocator(), &isa_features),
- isa_features_(isa_features),
+ assembler_(graph->GetAllocator(),
+ compiler_options.GetInstructionSetFeatures()->AsMips64InstructionSetFeatures()),
uint32_literals_(std::less<uint32_t>(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
uint64_literals_(std::less<uint64_t>(),
@@ -968,6 +968,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
@@ -1508,12 +1509,13 @@ inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -1524,7 +1526,8 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* li
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- string_bss_entry_patches_.size();
+ string_bss_entry_patches_.size() +
+ boot_image_intrinsic_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
@@ -1533,11 +1536,14 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* li
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
@@ -1548,6 +1554,13 @@ void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* li
DCHECK_EQ(size, linker_patches->size());
}
+CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageIntrinsicPatch(
+ uint32_t intrinsic_data,
+ const PcRelativePatchInfo* info_high) {
+ return NewPcRelativePatch(
+ /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
+}
+
CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high) {
@@ -1638,6 +1651,50 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn
}
}
+void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_reference) {
+ if (GetCompilerOptions().IsBootImage()) {
+ PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
+ PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+ __ Daddiu(reg, AT, /* placeholder */ 0x5678);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
+ PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+ // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
+ __ Lwu(reg, AT, /* placeholder */ 0x5678);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ uintptr_t address =
+ reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference);
+ __ LoadLiteral(reg, kLoadDoubleword, DeduplicateBootImageAddressLiteral(address));
+ }
+}
+
+void CodeGeneratorMIPS64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConvention calling_convention;
+ GpuRegister argument = calling_convention.GetRegisterAt(0);
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
+ PcRelativePatchInfo* info_low =
+ NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
+ EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
+ __ Daddiu(argument, AT, /* placeholder */ 0x5678);
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle) {
@@ -1753,6 +1810,10 @@ void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int re
stream << FpuRegister(reg);
}
+const Mips64InstructionSetFeatures& CodeGeneratorMIPS64::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsMips64InstructionSetFeatures();
+}
+
void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
HInstruction* instruction,
uint32_t dex_pc,
@@ -5908,6 +5969,14 @@ void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic*
codegen_->GenerateInvokePolymorphicCall(invoke);
}
+void LocationsBuilderMIPS64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+}
+
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
if (invoke->GetLocations()->Intrinsified()) {
IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 6e69e4611a..fc0908b2cb 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -352,7 +352,6 @@ class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
class CodeGeneratorMIPS64 : public CodeGenerator {
public:
CodeGeneratorMIPS64(HGraph* graph,
- const Mips64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS64() {}
@@ -484,9 +483,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
InstructionSet GetInstructionSet() const OVERRIDE { return InstructionSet::kMips64; }
- const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const {
- return isa_features_;
- }
+ const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
Mips64Label* GetLabelOf(HBasicBlock* block) const {
return CommonGetLabelOf<Mips64Label>(block_labels_, block);
@@ -591,6 +588,8 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
};
+ PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
+ const PcRelativePatchInfo* info_high = nullptr);
PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
const PcRelativePatchInfo* info_high = nullptr);
PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
@@ -615,6 +614,9 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
GpuRegister out,
PcRelativePatchInfo* info_low = nullptr);
+ void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_reference);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
+
void PatchJitRootUse(uint8_t* code,
const uint8_t* roots_data,
const Literal* literal,
@@ -655,7 +657,6 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
InstructionCodeGeneratorMIPS64 instruction_visitor_;
ParallelMoveResolverMIPS64 move_resolver_;
Mips64Assembler assembler_;
- const Mips64InstructionSetFeatures& isa_features_;
// Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
Uint32ToLiteralMap uint32_literals_;
@@ -675,6 +676,8 @@ class CodeGeneratorMIPS64 : public CodeGenerator {
ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
// PC-relative type patch info for kBssEntry.
ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
// Patches for string root accesses in JIT compiled code.
StringToLiteralMap jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_vector_arm64.cc b/compiler/optimizing/code_generator_vector_arm64.cc
index 6b0ec253e9..6d135a9bfb 100644
--- a/compiler/optimizing/code_generator_vector_arm64.cc
+++ b/compiler/optimizing/code_generator_vector_arm64.cc
@@ -29,7 +29,7 @@ using helpers::Arm64CanEncodeConstantAsImmediate;
using helpers::DRegisterFrom;
using helpers::HeapOperand;
using helpers::InputRegisterAt;
-using helpers::Int64ConstantFrom;
+using helpers::Int64FromLocation;
using helpers::OutputRegister;
using helpers::VRegisterFrom;
using helpers::WRegisterFrom;
@@ -78,7 +78,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar*
case DataType::Type::kInt8:
DCHECK_EQ(16u, instruction->GetVectorLength());
if (src_loc.IsConstant()) {
- __ Movi(dst.V16B(), Int64ConstantFrom(src_loc));
+ __ Movi(dst.V16B(), Int64FromLocation(src_loc));
} else {
__ Dup(dst.V16B(), InputRegisterAt(instruction, 0));
}
@@ -87,7 +87,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar*
case DataType::Type::kInt16:
DCHECK_EQ(8u, instruction->GetVectorLength());
if (src_loc.IsConstant()) {
- __ Movi(dst.V8H(), Int64ConstantFrom(src_loc));
+ __ Movi(dst.V8H(), Int64FromLocation(src_loc));
} else {
__ Dup(dst.V8H(), InputRegisterAt(instruction, 0));
}
@@ -95,7 +95,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar*
case DataType::Type::kInt32:
DCHECK_EQ(4u, instruction->GetVectorLength());
if (src_loc.IsConstant()) {
- __ Movi(dst.V4S(), Int64ConstantFrom(src_loc));
+ __ Movi(dst.V4S(), Int64FromLocation(src_loc));
} else {
__ Dup(dst.V4S(), InputRegisterAt(instruction, 0));
}
@@ -103,7 +103,7 @@ void InstructionCodeGeneratorARM64::VisitVecReplicateScalar(HVecReplicateScalar*
case DataType::Type::kInt64:
DCHECK_EQ(2u, instruction->GetVectorLength());
if (src_loc.IsConstant()) {
- __ Movi(dst.V2D(), Int64ConstantFrom(src_loc));
+ __ Movi(dst.V2D(), Int64FromLocation(src_loc));
} else {
__ Dup(dst.V2D(), XRegisterFrom(src_loc));
}
@@ -1333,7 +1333,7 @@ MemOperand InstructionCodeGeneratorARM64::VecAddress(
DCHECK(!instruction->InputAt(0)->IsIntermediateAddress());
if (index.IsConstant()) {
- offset += Int64ConstantFrom(index) << shift;
+ offset += Int64FromLocation(index) << shift;
return HeapOperand(base, offset);
} else {
*scratch = temps_scope->AcquireSameSizeAs(base);
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 086ae07a06..58808769e2 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -1125,13 +1125,59 @@ static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* in
}
}
-void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
+void LocationsBuilderX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instr) {
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
+ switch (instr->GetPackedType()) {
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputAccumulatorIndex, Location::RequiresFpuRegister());
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresFpuRegister());
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputMulRightIndex, Location::RequiresFpuRegister());
+ DCHECK_EQ(HVecMultiplyAccumulate::kInputAccumulatorIndex, 0);
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ // VecMultiplyAccumulate is supported only for single and
+ // double precision floating points. Hence integral types
+ // are still not converted.
+ LOG(FATAL) << "Unsupported SIMD Type";
+ }
}
-void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- // TODO: pmaddwd?
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+void InstructionCodeGeneratorX86::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instr) {
+ LocationSummary* locations = instr->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister accumulator = locations->InAt(
+ HVecMultiplyAccumulate::kInputAccumulatorIndex).AsFpuRegister<XmmRegister>();
+ XmmRegister mul_left = locations->InAt(
+ HVecMultiplyAccumulate::kInputMulLeftIndex).AsFpuRegister<XmmRegister>();
+ XmmRegister mul_right = locations->InAt(
+ HVecMultiplyAccumulate::kInputMulRightIndex).AsFpuRegister<XmmRegister>();
+ switch (instr->GetPackedType()) {
+ case DataType::Type::kFloat32:
+ DCHECK_EQ(4u, instr->GetVectorLength());
+ if (instr->GetOpKind() == HInstruction::InstructionKind::kAdd)
+ __ vfmadd231ps(accumulator, mul_left, mul_right);
+ else
+ __ vfmsub231ps(accumulator, mul_left, mul_right);
+ break;
+ case DataType::Type::kFloat64:
+ DCHECK_EQ(2u, instr->GetVectorLength());
+ if (instr->GetOpKind() == HInstruction::InstructionKind::kAdd)
+ __ vfmadd231pd(accumulator, mul_left, mul_right);
+ else
+ __ vfmsub231pd(accumulator, mul_left, mul_right);
+ break;
+ default:
+
+ // VecMultiplyAccumulate is supported only for single and
+ // double precision floating points. Hence integral types
+ // are still not converted.
+ LOG(FATAL) << "Unsupported SIMD Type";
+ }
}
void LocationsBuilderX86::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 4d31ab68d1..4795e86933 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -1098,13 +1098,61 @@ static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* in
}
}
-void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
+void LocationsBuilderX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instr) {
+ LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
+ switch (instr->GetPackedType()) {
+ case DataType::Type::kFloat32:
+ case DataType::Type::kFloat64:
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputAccumulatorIndex, Location::RequiresFpuRegister());
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresFpuRegister());
+ locations->SetInAt(
+ HVecMultiplyAccumulate::kInputMulRightIndex, Location::RequiresFpuRegister());
+ DCHECK_EQ(HVecMultiplyAccumulate::kInputAccumulatorIndex, 0);
+ locations->SetOut(Location::SameAsFirstInput());
+ break;
+ default:
+ // VecMultiplyAccumulate is supported only for single and
+ // double precision floating points. Hence integral types
+ // are still not converted.
+ LOG(FATAL) << "Unsupported SIMD type";
+ }
}
-void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
- // TODO: pmaddwd?
- LOG(FATAL) << "No SIMD for " << instruction->GetId();
+
+void InstructionCodeGeneratorX86_64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instr) {
+ LocationSummary* locations = instr->GetLocations();
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
+ XmmRegister accumulator = locations->InAt(
+ HVecMultiplyAccumulate::kInputAccumulatorIndex).AsFpuRegister<XmmRegister>();
+ XmmRegister mul_left = locations->InAt(
+ HVecMultiplyAccumulate::kInputMulLeftIndex).AsFpuRegister<XmmRegister>();
+ XmmRegister mul_right = locations->InAt(
+ HVecMultiplyAccumulate::kInputMulRightIndex).AsFpuRegister<XmmRegister>();
+
+ switch (instr->GetPackedType()) {
+ case DataType::Type::kFloat32:
+ DCHECK_EQ(4u, instr->GetVectorLength());
+ if (instr->GetOpKind() == HInstruction::InstructionKind::kAdd)
+ __ vfmadd231ps(accumulator, mul_left, mul_right);
+ else
+ __ vfmsub231ps(accumulator, mul_left, mul_right);
+ break;
+ case DataType::Type::kFloat64:
+ DCHECK_EQ(2u, instr->GetVectorLength());
+ if (instr->GetOpKind() == HInstruction::InstructionKind::kAdd)
+ __ vfmadd231pd(accumulator, mul_left, mul_right);
+ else
+ __ vfmsub231pd(accumulator, mul_left, mul_right);
+ break;
+ default:
+
+ // VecMultiplyAccumulate is supported only for single and
+ // double precision floating points. Hence integral types
+ // are still not converted.
+ LOG(FATAL) << "Unsupported SIMD Type";
+ }
}
void LocationsBuilderX86_64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 9e315381b1..d189476a48 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -23,6 +23,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86.h"
@@ -957,6 +958,10 @@ void CodeGeneratorX86::DumpFloatingPointRegister(std::ostream& stream, int reg)
stream << XmmRegister(reg);
}
+const X86InstructionSetFeatures& CodeGeneratorX86::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsX86InstructionSetFeatures();
+}
+
size_t CodeGeneratorX86::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ movl(Address(ESP, stack_index), static_cast<Register>(reg_id));
return kX86WordSize;
@@ -1008,7 +1013,6 @@ void CodeGeneratorX86::GenerateInvokeRuntime(int32_t entry_point_offset) {
}
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
- const X86InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -1026,13 +1030,13 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
assembler_(graph->GetAllocator()),
- isa_features_(isa_features),
boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
constant_area_start_(-1),
@@ -2188,7 +2192,9 @@ void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
- if (invoke->GetLocations()->CanCall() && invoke->HasPcRelativeMethodLoadKind()) {
+ if (invoke->GetLocations()->CanCall() &&
+ invoke->HasPcRelativeMethodLoadKind() &&
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).IsInvalid()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
}
return;
@@ -2311,6 +2317,14 @@ void InstructionCodeGeneratorX86::VisitInvokePolymorphic(HInvokePolymorphic* inv
codegen_->GenerateInvokePolymorphicCall(invoke);
}
+void LocationsBuilderX86::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+}
+
void LocationsBuilderX86::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
@@ -4902,6 +4916,13 @@ void CodeGeneratorX86::GenerateVirtualCall(
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
+void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
+ uint32_t intrinsic_data) {
+ boot_image_intrinsic_patches_.emplace_back(
+ method_address, /* target_dex_file */ nullptr, intrinsic_data);
+ __ Bind(&boot_image_intrinsic_patches_.back().label);
+}
+
void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset) {
boot_image_method_patches_.emplace_back(
@@ -4961,6 +4982,62 @@ Label* CodeGeneratorX86::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86::LoadBootImageAddress(Register reg,
+ uint32_t boot_image_reference,
+ HInvokeStaticOrDirect* invoke) {
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+ __ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+ RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+ __ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+ RecordBootImageRelRoPatch(method_address, boot_image_reference);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
+void CodeGeneratorX86::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConvention calling_convention;
+ Register argument = calling_convention.GetRegisterAt(0);
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
+ __ leal(argument, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ boot_image_type_patches_.emplace_back(method_address, target_method.dex_file, type_idx.index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset, invoke);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
@@ -4978,12 +5055,13 @@ inline void CodeGeneratorX86::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -4994,7 +5072,8 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linke
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- string_bss_entry_patches_.size();
+ string_bss_entry_patches_.size() +
+ boot_image_intrinsic_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
@@ -5003,11 +5082,14 @@ void CodeGeneratorX86::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linke
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 6c76e27d35..cb58e920ea 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -316,7 +316,6 @@ class JumpTableRIPFixup;
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
- const X86InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86() {}
@@ -390,6 +389,8 @@ class CodeGeneratorX86 : public CodeGenerator {
return InstructionSet::kX86;
}
+ const X86InstructionSetFeatures& GetInstructionSetFeatures() const;
+
// Helper method to move a 32bits value between two locations.
void Move32(Location destination, Location source);
// Helper method to move a 64bits value between two locations.
@@ -418,6 +419,8 @@ class CodeGeneratorX86 : public CodeGenerator {
void GenerateVirtualCall(
HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
+ uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
uint32_t boot_image_offset);
void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
@@ -426,6 +429,12 @@ class CodeGeneratorX86 : public CodeGenerator {
Label* NewTypeBssEntryPatch(HLoadClass* load_class);
void RecordBootImageStringPatch(HLoadString* load_string);
Label* NewStringBssEntryPatch(HLoadString* load_string);
+
+ void LoadBootImageAddress(Register reg,
+ uint32_t boot_image_reference,
+ HInvokeStaticOrDirect* invoke);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
+
Label* NewJitRootStringPatch(const DexFile& dex_file,
dex::StringIndex string_index,
Handle<mirror::String> handle);
@@ -469,10 +478,6 @@ class CodeGeneratorX86 : public CodeGenerator {
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
- const X86InstructionSetFeatures& GetInstructionSetFeatures() const {
- return isa_features_;
- }
-
void AddMethodAddressOffset(HX86ComputeBaseMethodAddress* method_base, int32_t offset) {
method_address_offset_.Put(method_base->GetId(), offset);
}
@@ -635,7 +640,6 @@ class CodeGeneratorX86 : public CodeGenerator {
InstructionCodeGeneratorX86 instruction_visitor_;
ParallelMoveResolverX86 move_resolver_;
X86Assembler assembler_;
- const X86InstructionSetFeatures& isa_features_;
// PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
// Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
@@ -650,6 +654,8 @@ class CodeGeneratorX86 : public CodeGenerator {
ArenaDeque<X86PcRelativePatchInfo> boot_image_string_patches_;
// PC-relative String patch info for kBssEntry.
ArenaDeque<X86PcRelativePatchInfo> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<X86PcRelativePatchInfo> boot_image_intrinsic_patches_;
// Patches for string root accesses in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f7397046d7..bea3da070a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -22,6 +22,7 @@
#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "gc/accounting/card_table.h"
+#include "gc/space/image_space.h"
#include "heap_poisoning.h"
#include "intrinsics.h"
#include "intrinsics_x86_64.h"
@@ -1066,6 +1067,11 @@ void CodeGeneratorX86_64::GenerateVirtualCall(
RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
}
+void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
+ boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data);
+ __ Bind(&boot_image_intrinsic_patches_.back().label);
+}
+
void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset);
__ Bind(&boot_image_method_patches_.back().label);
@@ -1107,6 +1113,43 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) {
return &string_bss_entry_patches_.back().label;
}
+void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) {
+ if (GetCompilerOptions().IsBootImage()) {
+ __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ RecordBootImageIntrinsicPatch(boot_image_reference);
+ } else if (GetCompilerOptions().GetCompilePic()) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ RecordBootImageRelRoPatch(boot_image_reference);
+ } else {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(!heap->GetBootImageSpaces().empty());
+ const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
+ __ movl(reg, Immediate(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address))));
+ }
+}
+
+void CodeGeneratorX86_64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
+ uint32_t boot_image_offset) {
+ DCHECK(invoke->IsStatic());
+ InvokeRuntimeCallingConvention calling_convention;
+ CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
+ if (GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
+ // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
+ __ leal(argument,
+ Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
+ MethodReference target_method = invoke->GetTargetMethod();
+ dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
+ boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_);
+ __ Bind(&boot_image_type_patches_.back().label);
+ } else {
+ LoadBootImageAddress(argument, boot_image_offset);
+ }
+ InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
+ CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+}
+
// The label points to the end of the "movl" or another instruction but the literal offset
// for method patch needs to point to the embedded constant which occupies the last 4 bytes.
constexpr uint32_t kLabelPositionToLiteralOffsetAdjustment = 4u;
@@ -1122,12 +1165,13 @@ inline void CodeGeneratorX86_64::EmitPcRelativeLinkerPatches(
}
}
-linker::LinkerPatch DataBimgRelRoPatchAdapter(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t pc_insn_offset,
- uint32_t boot_image_offset) {
- DCHECK(target_dex_file == nullptr); // Unused for DataBimgRelRoPatch(), should be null.
- return linker::LinkerPatch::DataBimgRelRoPatch(literal_offset, pc_insn_offset, boot_image_offset);
+template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
+linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
+ const DexFile* target_dex_file,
+ uint32_t pc_insn_offset,
+ uint32_t boot_image_offset) {
+ DCHECK(target_dex_file == nullptr); // Unused for these patches, should be null.
+ return Factory(literal_offset, pc_insn_offset, boot_image_offset);
}
void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
@@ -1138,7 +1182,8 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* li
boot_image_type_patches_.size() +
type_bss_entry_patches_.size() +
boot_image_string_patches_.size() +
- string_bss_entry_patches_.size();
+ string_bss_entry_patches_.size() +
+ boot_image_intrinsic_patches_.size();
linker_patches->reserve(size);
if (GetCompilerOptions().IsBootImage()) {
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
@@ -1147,11 +1192,14 @@ void CodeGeneratorX86_64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* li
boot_image_type_patches_, linker_patches);
EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
boot_image_string_patches_, linker_patches);
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+ boot_image_intrinsic_patches_, linker_patches);
} else {
- EmitPcRelativeLinkerPatches<DataBimgRelRoPatchAdapter>(
+ EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
boot_image_method_patches_, linker_patches);
DCHECK(boot_image_type_patches_.empty());
DCHECK(boot_image_string_patches_.empty());
+ DCHECK(boot_image_intrinsic_patches_.empty());
}
EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
method_bss_entry_patches_, linker_patches);
@@ -1170,6 +1218,10 @@ void CodeGeneratorX86_64::DumpFloatingPointRegister(std::ostream& stream, int re
stream << FloatRegister(reg);
}
+const X86_64InstructionSetFeatures& CodeGeneratorX86_64::GetInstructionSetFeatures() const {
+ return *GetCompilerOptions().GetInstructionSetFeatures()->AsX86_64InstructionSetFeatures();
+}
+
size_t CodeGeneratorX86_64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ movq(Address(CpuRegister(RSP), stack_index), CpuRegister(reg_id));
return kX86_64WordSize;
@@ -1224,7 +1276,6 @@ static constexpr int kNumberOfCpuRegisterPairs = 0;
// Use a fake return address register to mimic Quick.
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
- const X86_64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats)
: CodeGenerator(graph,
@@ -1243,7 +1294,6 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
instruction_visitor_(graph, this),
move_resolver_(graph->GetAllocator(), this),
assembler_(graph->GetAllocator()),
- isa_features_(isa_features),
constant_area_start_(0),
boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
@@ -1251,6 +1301,7 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+ boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
@@ -2501,6 +2552,14 @@ void InstructionCodeGeneratorX86_64::VisitInvokePolymorphic(HInvokePolymorphic*
codegen_->GenerateInvokePolymorphicCall(invoke);
}
+void LocationsBuilderX86_64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeCustom(HInvokeCustom* invoke) {
+ codegen_->GenerateInvokeCustomCall(invoke);
+}
+
void LocationsBuilderX86_64::VisitNeg(HNeg* neg) {
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 9a4c53b524..5ba7f9cb71 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -296,7 +296,6 @@ class JumpTableRIPFixup;
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
- const X86_64InstructionSetFeatures& isa_features,
const CompilerOptions& compiler_options,
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86_64() {}
@@ -370,6 +369,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
return InstructionSet::kX86_64;
}
+ const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const;
+
// Emit a write barrier.
void MarkGCCard(CpuRegister temp,
CpuRegister card,
@@ -415,6 +416,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void GenerateVirtualCall(
HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) OVERRIDE;
+ void RecordBootImageIntrinsicPatch(uint32_t intrinsic_data);
void RecordBootImageRelRoPatch(uint32_t boot_image_offset);
void RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke);
void RecordMethodBssEntryPatch(HInvokeStaticOrDirect* invoke);
@@ -429,7 +431,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
dex::TypeIndex type_index,
Handle<mirror::Class> handle);
- void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+ void LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference);
+ void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) OVERRIDE;
@@ -440,10 +443,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE;
- const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const {
- return isa_features_;
- }
-
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -566,6 +565,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
+ void MoveFromReturnRegister(Location trg, DataType::Type type) OVERRIDE;
+
// Assign a 64 bit constant to an address.
void MoveInt64ToAddress(const Address& addr_low,
const Address& addr_high,
@@ -604,7 +605,6 @@ class CodeGeneratorX86_64 : public CodeGenerator {
InstructionCodeGeneratorX86_64 instruction_visitor_;
ParallelMoveResolverX86_64 move_resolver_;
X86_64Assembler assembler_;
- const X86_64InstructionSetFeatures& isa_features_;
// Offset to the start of the constant area in the assembled code.
// Used for fixups to the constant area.
@@ -623,6 +623,8 @@ class CodeGeneratorX86_64 : public CodeGenerator {
ArenaDeque<PatchInfo<Label>> boot_image_string_patches_;
// PC-relative String patch info for kBssEntry.
ArenaDeque<PatchInfo<Label>> string_bss_entry_patches_;
+ // PC-relative patch info for IntrinsicObjects.
+ ArenaDeque<PatchInfo<Label>> boot_image_intrinsic_patches_;
// Patches for string literals in JIT compiled code.
ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index a0fd5ffcb1..86687e60a9 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -89,7 +89,8 @@ void CodegenTest::TestCode(const std::vector<uint16_t>& data, bool has_result, i
HGraph* graph = CreateCFG(data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -100,7 +101,8 @@ void CodegenTest::TestCodeLong(const std::vector<uint16_t>& data,
HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- RunCode(target_config, graph, [](HGraph*) {}, has_result, expected);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -460,7 +462,8 @@ TEST_F(CodegenTest, NonMaterializedCondition) {
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- RunCode(target_config, graph, hook_before_codegen, true, 0);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, 0);
}
}
@@ -506,7 +509,8 @@ TEST_F(CodegenTest, MaterializedCondition1) {
new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
}
}
}
@@ -573,7 +577,8 @@ TEST_F(CodegenTest, MaterializedCondition2) {
new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- RunCode(target_config, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
}
}
}
@@ -682,7 +687,8 @@ void CodegenTest::TestComparison(IfCondition condition,
block->AddInstruction(new (GetAllocator()) HReturn(comparison));
graph->BuildDominatorTree();
- RunCode(target_config, graph, [](HGraph*) {}, true, expected_result);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, true, expected_result);
}
TEST_F(CodegenTest, ComparisonsInt) {
@@ -713,10 +719,9 @@ TEST_F(CodegenTest, ComparisonsLong) {
#ifdef ART_ENABLE_CODEGEN_arm
TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
- std::unique_ptr<const ArmInstructionSetFeatures> features(
- ArmInstructionSetFeatures::FromCppDefines());
+ OverrideInstructionSetFeatures(InstructionSet::kThumb2, "default");
HGraph* graph = CreateGraph();
- arm::CodeGeneratorARMVIXL codegen(graph, *features.get(), CompilerOptions());
+ arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options_);
codegen.Initialize();
@@ -737,10 +742,9 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
#ifdef ART_ENABLE_CODEGEN_arm64
// Regression test for b/34760542.
TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
- std::unique_ptr<const Arm64InstructionSetFeatures> features(
- Arm64InstructionSetFeatures::FromCppDefines());
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
codegen.Initialize();
@@ -787,10 +791,9 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
// Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off.
TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
- std::unique_ptr<const Arm64InstructionSetFeatures> features(
- Arm64InstructionSetFeatures::FromCppDefines());
+ OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *features.get(), CompilerOptions());
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
codegen.Initialize();
@@ -824,9 +827,9 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
#ifdef ART_ENABLE_CODEGEN_mips
TEST_F(CodegenTest, MipsClobberRA) {
- std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
- MipsInstructionSetFeatures::FromCppDefines());
- if (!CanExecute(InstructionSet::kMips) || features_mips->IsR6()) {
+ OverrideInstructionSetFeatures(InstructionSet::kMips, "mips32r");
+ CHECK(!instruction_set_features_->AsMipsInstructionSetFeatures()->IsR6());
+ if (!CanExecute(InstructionSet::kMips)) {
// HMipsComputeBaseMethodAddress and the NAL instruction behind it
// should only be generated on non-R6.
return;
@@ -860,7 +863,7 @@ TEST_F(CodegenTest, MipsClobberRA) {
graph->BuildDominatorTree();
- mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), CompilerOptions());
+ mips::CodeGeneratorMIPS codegenMIPS(graph, *compiler_options_);
// Since there isn't HLoadClass or HLoadString, we need to manually indicate
// that RA is clobbered and the method entry code should generate a stack frame
// and preserve RA in it. And this is what we're testing here.
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index 792cfb539a..91811262de 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -17,17 +17,11 @@
#ifndef ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
#define ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
-#include "arch/arm/instruction_set_features_arm.h"
#include "arch/arm/registers_arm.h"
-#include "arch/arm64/instruction_set_features_arm64.h"
#include "arch/instruction_set.h"
-#include "arch/mips/instruction_set_features_mips.h"
#include "arch/mips/registers_mips.h"
-#include "arch/mips64/instruction_set_features_mips64.h"
#include "arch/mips64/registers_mips64.h"
-#include "arch/x86/instruction_set_features_x86.h"
#include "arch/x86/registers_x86.h"
-#include "arch/x86_64/instruction_set_features_x86_64.h"
#include "code_simulator.h"
#include "code_simulator_container.h"
#include "common_compiler_test.h"
@@ -101,10 +95,8 @@ class CodegenTargetConfig {
// to just overwrite the code generator.
class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
public:
- TestCodeGeneratorARMVIXL(HGraph* graph,
- const ArmInstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : arm::CodeGeneratorARMVIXL(graph, isa_features, compiler_options) {
+ TestCodeGeneratorARMVIXL(HGraph* graph, const CompilerOptions& compiler_options)
+ : arm::CodeGeneratorARMVIXL(graph, compiler_options) {
AddAllocatedRegister(Location::RegisterLocation(arm::R6));
AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
@@ -145,10 +137,8 @@ class TestCodeGeneratorARMVIXL : public arm::CodeGeneratorARMVIXL {
// function.
class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 {
public:
- TestCodeGeneratorARM64(HGraph* graph,
- const Arm64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : arm64::CodeGeneratorARM64(graph, isa_features, compiler_options) {}
+ TestCodeGeneratorARM64(HGraph* graph, const CompilerOptions& compiler_options)
+ : arm64::CodeGeneratorARM64(graph, compiler_options) {}
void MaybeGenerateMarkingRegisterCheck(int codem ATTRIBUTE_UNUSED,
Location temp_loc ATTRIBUTE_UNUSED) OVERRIDE {
@@ -165,10 +155,8 @@ class TestCodeGeneratorARM64 : public arm64::CodeGeneratorARM64 {
#ifdef ART_ENABLE_CODEGEN_x86
class TestCodeGeneratorX86 : public x86::CodeGeneratorX86 {
public:
- TestCodeGeneratorX86(HGraph* graph,
- const X86InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
- : x86::CodeGeneratorX86(graph, isa_features, compiler_options) {
+ TestCodeGeneratorX86(HGraph* graph, const CompilerOptions& compiler_options)
+ : x86::CodeGeneratorX86(graph, compiler_options) {
// Save edi, we need it for getting enough registers for long multiplication.
AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
}
@@ -324,11 +312,11 @@ static void RunCode(CodeGenerator* codegen,
template <typename Expected>
static void RunCode(CodegenTargetConfig target_config,
+ const CompilerOptions& compiler_options,
HGraph* graph,
std::function<void(HGraph*)> hook_before_codegen,
bool has_result,
Expected expected) {
- CompilerOptions compiler_options;
std::unique_ptr<CodeGenerator> codegen(target_config.CreateCodeGenerator(graph,
compiler_options));
RunCode(codegen.get(), graph, hook_before_codegen, has_result, expected);
@@ -336,55 +324,37 @@ static void RunCode(CodegenTargetConfig target_config,
#ifdef ART_ENABLE_CODEGEN_arm
CodeGenerator* create_codegen_arm_vixl32(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
- ArmInstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator())
- TestCodeGeneratorARMVIXL(graph, *features_arm.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorARMVIXL(graph, compiler_options);
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
CodeGenerator* create_codegen_arm64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
- Arm64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator())
- TestCodeGeneratorARM64(graph, *features_arm64.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorARM64(graph, compiler_options);
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
CodeGenerator* create_codegen_x86(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator()) TestCodeGeneratorX86(
- graph, *features_x86.get(), compiler_options);
+ return new (graph->GetAllocator()) TestCodeGeneratorX86(graph, compiler_options);
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
CodeGenerator* create_codegen_x86_64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
- X86_64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator())
- x86_64::CodeGeneratorX86_64(graph, *features_x86_64.get(), compiler_options);
+ return new (graph->GetAllocator()) x86_64::CodeGeneratorX86_64(graph, compiler_options);
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
- MipsInstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator())
- mips::CodeGeneratorMIPS(graph, *features_mips.get(), compiler_options);
+ return new (graph->GetAllocator()) mips::CodeGeneratorMIPS(graph, compiler_options);
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
- std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
- Mips64InstructionSetFeatures::FromCppDefines());
- return new (graph->GetAllocator())
- mips64::CodeGeneratorMIPS64(graph, *features_mips64.get(), compiler_options);
+ return new (graph->GetAllocator()) mips64::CodeGeneratorMIPS64(graph, compiler_options);
}
#endif
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index ed2f8e995d..5556f16740 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -151,23 +151,15 @@ inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* inst
return InputCPURegisterAt(instr, index);
}
-inline int64_t Int64ConstantFrom(Location location) {
- HConstant* instr = location.GetConstant();
- if (instr->IsIntConstant()) {
- return instr->AsIntConstant()->GetValue();
- } else if (instr->IsNullConstant()) {
- return 0;
- } else {
- DCHECK(instr->IsLongConstant()) << instr->DebugName();
- return instr->AsLongConstant()->GetValue();
- }
+inline int64_t Int64FromLocation(Location location) {
+ return Int64FromConstant(location.GetConstant());
}
inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) {
if (location.IsRegister()) {
return vixl::aarch64::Operand(RegisterFrom(location, type));
} else {
- return vixl::aarch64::Operand(Int64ConstantFrom(location));
+ return vixl::aarch64::Operand(Int64FromLocation(location));
}
}
@@ -234,6 +226,13 @@ inline vixl::aarch64::Operand OperandFromMemOperand(
}
}
+inline bool AddSubCanEncodeAsImmediate(int64_t value) {
+ // If `value` does not fit but `-value` does, VIXL will automatically use
+ // the 'opposite' instruction.
+ return vixl::aarch64::Assembler::IsImmAddSub(value)
+ || vixl::aarch64::Assembler::IsImmAddSub(-value);
+}
+
inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
int64_t value = CodeGenerator::GetInt64ValueOf(constant);
@@ -249,6 +248,20 @@ inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction*
return IsUint<8>(value);
}
+ // Code generation for Min/Max:
+ // Cmp left_op, right_op
+ // Csel dst, left_op, right_op, cond
+ if (instr->IsMin() || instr->IsMax()) {
+ if (constant->GetUses().HasExactlyOneElement()) {
+ // If value can be encoded as immediate for the Cmp, then let VIXL handle
+ // the constant generation for the Csel.
+ return AddSubCanEncodeAsImmediate(value);
+ }
+ // These values are encodable as immediates for Cmp and VIXL will use csinc and csinv
+ // with the zr register as right_op, hence no constant generation is required.
+ return constant->IsZeroBitPattern() || constant->IsOne() || constant->IsMinusOne();
+ }
+
// For single uses we let VIXL handle the constant generation since it will
// use registers that are not managed by the register allocator (wip0, wip1).
if (constant->GetUses().HasExactlyOneElement()) {
@@ -275,10 +288,7 @@ inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction*
instr->IsSub())
<< instr->DebugName();
// Uses aliases of ADD/SUB instructions.
- // If `value` does not fit but `-value` does, VIXL will automatically use
- // the 'opposite' instruction.
- return vixl::aarch64::Assembler::IsImmAddSub(value)
- || vixl::aarch64::Assembler::IsImmAddSub(-value);
+ return AddSubCanEncodeAsImmediate(value);
}
}
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index d27104752b..b1436f863c 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -16,8 +16,6 @@
#include <functional>
-#include "arch/x86/instruction_set_features_x86.h"
-#include "code_generator_x86.h"
#include "constant_folding.h"
#include "dead_code_elimination.h"
#include "driver/compiler_options.h"
@@ -60,9 +58,6 @@ class ConstantFoldingTest : public OptimizingUnitTest {
std::string actual_before = printer_before.str();
EXPECT_EQ(expected_before, actual_before);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegenX86(graph_, *features_x86.get(), CompilerOptions());
HConstantFolding(graph_, "constant_folding").Run();
GraphChecker graph_checker_cf(graph_);
graph_checker_cf.Run();
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 1a7f9266e9..54bff22e98 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -47,7 +47,7 @@ class CFREVisitor : public HGraphVisitor {
candidate_fences_.push_back(constructor_fence);
for (size_t input_idx = 0; input_idx < constructor_fence->InputCount(); ++input_idx) {
- candidate_fence_targets_.Insert(constructor_fence->InputAt(input_idx));
+ candidate_fence_targets_.insert(constructor_fence->InputAt(input_idx));
}
}
@@ -208,13 +208,13 @@ class CFREVisitor : public HGraphVisitor {
// there is no benefit to this extra complexity unless we also reordered
// the stores to come later.
candidate_fences_.clear();
- candidate_fence_targets_.Clear();
+ candidate_fence_targets_.clear();
}
// A publishing 'store' is only interesting if the value being stored
// is one of the fence `targets` in `candidate_fences`.
bool IsInterestingPublishTarget(HInstruction* store_input) const {
- return candidate_fence_targets_.Find(store_input) != candidate_fence_targets_.end();
+ return candidate_fence_targets_.find(store_input) != candidate_fence_targets_.end();
}
void MaybeMerge(HConstructorFence* target, HConstructorFence* src) {
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index adb6ce1187..277453545a 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -16,8 +16,6 @@
#include "dead_code_elimination.h"
-#include "arch/x86/instruction_set_features_x86.h"
-#include "code_generator_x86.h"
#include "driver/compiler_options.h"
#include "graph_checker.h"
#include "optimizing_unit_test.h"
@@ -45,9 +43,6 @@ void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data,
std::string actual_before = printer_before.str();
ASSERT_EQ(actual_before, expected_before);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run();
GraphChecker graph_checker(graph);
graph_checker.Run();
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
index b63914faf7..293c1ab3f3 100644
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ b/compiler/optimizing/emit_swap_mips_test.cc
@@ -28,11 +28,12 @@ namespace art {
class EmitSwapMipsTest : public OptimizingUnitTest {
public:
void SetUp() OVERRIDE {
+ instruction_set_ = InstructionSet::kMips;
+ instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
+ OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
- isa_features_ = MipsInstructionSetFeatures::FromCppDefines();
- codegen_ = new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_,
- *isa_features_.get(),
- CompilerOptions());
+ codegen_.reset(
+ new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_, *compiler_options_));
moves_ = new (GetAllocator()) HParallelMove(GetAllocator());
test_helper_.reset(
new AssemblerTestInfrastructure(GetArchitectureString(),
@@ -47,8 +48,10 @@ class EmitSwapMipsTest : public OptimizingUnitTest {
void TearDown() OVERRIDE {
test_helper_.reset();
- isa_features_.reset();
+ codegen_.reset();
+ graph_ = nullptr;
ResetPoolAndAllocator();
+ OptimizingUnitTest::TearDown();
}
// Get the typically used name for this architecture.
@@ -106,10 +109,9 @@ class EmitSwapMipsTest : public OptimizingUnitTest {
protected:
HGraph* graph_;
HParallelMove* moves_;
- mips::CodeGeneratorMIPS* codegen_;
+ std::unique_ptr<mips::CodeGeneratorMIPS> codegen_;
mips::MipsAssembler* assembler_;
std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
- std::unique_ptr<const MipsInstructionSetFeatures> isa_features_;
};
TEST_F(EmitSwapMipsTest, TwoRegisters) {
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 4863718518..e6b6326726 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -479,7 +479,10 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
HInstruction* next = current->GetNext();
// Do not kill the set with the side effects of the instruction just now: if
// the instruction is GVN'ed, we don't need to kill.
- if (current->CanBeMoved()) {
+ //
+ // BoundType is a special case example of an instruction which shouldn't be moved but can be
+ // GVN'ed.
+ if (current->CanBeMoved() || current->IsBoundType()) {
if (current->IsBinaryOperation() && current->AsBinaryOperation()->IsCommutative()) {
// For commutative ops, (x op y) will be treated the same as (y op x)
// after fixed ordering.
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 6900cd883a..3ba741472e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -126,7 +126,7 @@ void HInliner::UpdateInliningBudget() {
}
bool HInliner::Run() {
- if (compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits() == 0) {
+ if (codegen_->GetCompilerOptions().GetInlineMaxCodeUnits() == 0) {
// Inlining effectively disabled.
return false;
} else if (graph_->IsDebuggable()) {
@@ -460,9 +460,10 @@ static bool AlwaysThrows(CompilerDriver* const compiler_driver, ArtMethod* metho
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved() ||
- invoke_instruction->IsInvokePolymorphic()) {
- return false; // Don't bother to move further if we know the method is unresolved or an
- // invoke-polymorphic.
+ invoke_instruction->IsInvokePolymorphic() ||
+ invoke_instruction->IsInvokeCustom()) {
+ return false; // Don't bother to move further if we know the method is unresolved or the
+ // invocation is polymorphic (invoke-{polymorphic,custom}).
}
ScopedObjectAccess soa(Thread::Current());
@@ -730,7 +731,7 @@ HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile(
offline_profile.dex_references.size());
for (size_t i = 0; i < offline_profile.dex_references.size(); i++) {
bool found = false;
- for (const DexFile* dex_file : compiler_driver_->GetDexFilesForOatFile()) {
+ for (const DexFile* dex_file : codegen_->GetCompilerOptions().GetDexFilesForOatFile()) {
if (offline_profile.dex_references[i].MatchesDex(dex_file)) {
dex_profile_index_to_dex_cache[i] =
caller_compilation_unit_.GetClassLinker()->FindDexCache(self, *dex_file);
@@ -948,7 +949,7 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver,
invoke_instruction->GetDexPc(),
/* needs_access_check */ false);
HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind(
- load_class, codegen_, compiler_driver_, caller_compilation_unit_);
+ load_class, codegen_, caller_compilation_unit_);
DCHECK(kind != HLoadClass::LoadKind::kInvalid)
<< "We should always be able to reference a class for inline caches";
// Load kind must be set before inserting the instruction into the graph.
@@ -1417,6 +1418,22 @@ size_t HInliner::CountRecursiveCallsOf(ArtMethod* method) const {
return count;
}
+static inline bool MayInline(const CompilerOptions& compiler_options,
+ const DexFile& inlined_from,
+ const DexFile& inlined_into) {
+ if (kIsTargetBuild) {
+ return true;
+ }
+
+ // We're not allowed to inline across dex files if we're the no-inline-from dex file.
+ if (!IsSameDexFile(inlined_from, inlined_into) &&
+ ContainsElement(compiler_options.GetNoInlineFromDexFile(), &inlined_from)) {
+ return false;
+ }
+
+ return true;
+}
+
bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* method,
ReferenceTypeInfo receiver_type,
@@ -1438,8 +1455,9 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
// Check whether we're allowed to inline. The outermost compilation unit is the relevant
// dex file here (though the transitivity of an inline chain would allow checking the calller).
- if (!compiler_driver_->MayInline(method->GetDexFile(),
- outer_compilation_unit_.GetDexFile())) {
+ if (!MayInline(codegen_->GetCompilerOptions(),
+ *method->GetDexFile(),
+ *outer_compilation_unit_.GetDexFile())) {
if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
LOG_SUCCESS() << "Successfully replaced pattern of invoke "
<< method->PrettyMethod();
@@ -1464,7 +1482,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
return false;
}
- size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
@@ -1765,7 +1783,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
graph_->GetArenaStack(),
callee_dex_file,
method_index,
- compiler_driver_->GetInstructionSet(),
+ codegen_->GetCompilerOptions().GetInstructionSet(),
invoke_type,
graph_->IsDebuggable(),
/* osr */ false,
@@ -1802,8 +1820,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
return false;
}
- if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
- compiler_driver_->GetInstructionSet())) {
+ if (!RegisterAllocator::CanAllocateRegistersFor(
+ *callee_graph, codegen_->GetCompilerOptions().GetInstructionSet())) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRegisterAllocator)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " cannot be inlined because of the register allocator";
@@ -2004,8 +2022,8 @@ void HInliner::RunOptimizations(HGraph* callee_graph,
// optimization that could lead to a HDeoptimize. The following optimizations do not.
HDeadCodeElimination dce(callee_graph, inline_stats_, "dead_code_elimination$inliner");
HConstantFolding fold(callee_graph, "constant_folding$inliner");
- HSharpening sharpening(callee_graph, codegen_, compiler_driver_);
- InstructionSimplifier simplify(callee_graph, codegen_, compiler_driver_, inline_stats_);
+ HSharpening sharpening(callee_graph, codegen_);
+ InstructionSimplifier simplify(callee_graph, codegen_, inline_stats_);
IntrinsicsRecognizer intrinsics(callee_graph, inline_stats_);
HOptimization* optimizations[] = {
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 24dc2ee9b4..731accd692 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -449,11 +449,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) {
target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kNone);
RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs);
- HandleInvoke(invoke,
- operands,
- dex_file_->GetMethodShorty(method_idx),
- /* clinit_check */ nullptr,
- /* is_unresolved */ false);
+ HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false);
// Add the return instruction.
if (return_type_ == DataType::Type::kVoid) {
@@ -916,11 +912,11 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
uint32_t method_idx,
const InstructionOperands& operands) {
InvokeType invoke_type = GetInvokeTypeFromOpCode(instruction.Opcode());
- const char* descriptor = dex_file_->GetMethodShorty(method_idx);
- DataType::Type return_type = DataType::FromShorty(descriptor[0]);
+ const char* shorty = dex_file_->GetMethodShorty(method_idx);
+ DataType::Type return_type = DataType::FromShorty(shorty[0]);
// Remove the return type from the 'proto'.
- size_t number_of_arguments = strlen(descriptor) - 1;
+ size_t number_of_arguments = strlen(shorty) - 1;
if (invoke_type != kStatic) { // instance call
// One extra argument for 'this'.
number_of_arguments++;
@@ -937,11 +933,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
dex_pc,
method_idx,
invoke_type);
- return HandleInvoke(invoke,
- operands,
- descriptor,
- nullptr /* clinit_check */,
- true /* is_unresolved */);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true);
}
// Replace calls to String.<init> with StringFactory.
@@ -968,7 +960,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
invoke_type,
target_method,
HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
- return HandleStringInit(invoke, operands, descriptor);
+ return HandleStringInit(invoke, operands, shorty);
}
// Potential class initialization check, in the case of a static method call.
@@ -1028,29 +1020,39 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
resolved_method,
ImTable::GetImtIndex(resolved_method));
}
-
- return HandleInvoke(invoke, operands, descriptor, clinit_check, false /* is_unresolved */);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check);
}
-bool HInstructionBuilder::BuildInvokePolymorphic(const Instruction& instruction ATTRIBUTE_UNUSED,
- uint32_t dex_pc,
+bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc,
uint32_t method_idx,
dex::ProtoIndex proto_idx,
const InstructionOperands& operands) {
- const char* descriptor = dex_file_->GetShorty(proto_idx);
- DCHECK_EQ(1 + ArtMethod::NumArgRegisters(descriptor), operands.GetNumberOfOperands());
- DataType::Type return_type = DataType::FromShorty(descriptor[0]);
- size_t number_of_arguments = strlen(descriptor);
+ const char* shorty = dex_file_->GetShorty(proto_idx);
+ DCHECK_EQ(1 + ArtMethod::NumArgRegisters(shorty), operands.GetNumberOfOperands());
+ DataType::Type return_type = DataType::FromShorty(shorty[0]);
+ size_t number_of_arguments = strlen(shorty);
HInvoke* invoke = new (allocator_) HInvokePolymorphic(allocator_,
number_of_arguments,
return_type,
dex_pc,
method_idx);
- return HandleInvoke(invoke,
- operands,
- descriptor,
- nullptr /* clinit_check */,
- false /* is_unresolved */);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
+}
+
+
+bool HInstructionBuilder::BuildInvokeCustom(uint32_t dex_pc,
+ uint32_t call_site_idx,
+ const InstructionOperands& operands) {
+ dex::ProtoIndex proto_idx = dex_file_->GetProtoIndexForCallSite(call_site_idx);
+ const char* shorty = dex_file_->GetShorty(proto_idx);
+ DataType::Type return_type = DataType::FromShorty(shorty[0]);
+ size_t number_of_arguments = strlen(shorty) - 1;
+ HInvoke* invoke = new (allocator_) HInvokeCustom(allocator_,
+ number_of_arguments,
+ call_site_idx,
+ return_type,
+ dex_pc);
+ return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false);
}
HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) {
@@ -1197,10 +1199,10 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor,
+ const char* shorty,
size_t start_index,
size_t* argument_index) {
- uint32_t descriptor_index = 1; // Skip the return type.
+ uint32_t shorty_index = 1; // Skip the return type.
const size_t number_of_operands = operands.GetNumberOfOperands();
for (size_t i = start_index;
// Make sure we don't go over the expected arguments or over the number of
@@ -1208,7 +1210,7 @@ bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke,
// it hasn't been properly checked.
(i < number_of_operands) && (*argument_index < invoke->GetNumberOfArguments());
i++, (*argument_index)++) {
- DataType::Type type = DataType::FromShorty(descriptor[descriptor_index++]);
+ DataType::Type type = DataType::FromShorty(shorty[shorty_index++]);
bool is_wide = (type == DataType::Type::kInt64) || (type == DataType::Type::kFloat64);
if (is_wide && ((i + 1 == number_of_operands) ||
(operands.GetOperand(i) + 1 != operands.GetOperand(i + 1)))) {
@@ -1250,9 +1252,9 @@ bool HInstructionBuilder::SetupInvokeArguments(HInvoke* invoke,
bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor,
- HClinitCheck* clinit_check,
- bool is_unresolved) {
+ const char* shorty,
+ bool is_unresolved,
+ HClinitCheck* clinit_check) {
DCHECK(!invoke->IsInvokeStaticOrDirect() || !invoke->AsInvokeStaticOrDirect()->IsStringInit());
size_t start_index = 0;
@@ -1267,7 +1269,7 @@ bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
argument_index = 1;
}
- if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
+ if (!SetupInvokeArguments(invoke, operands, shorty, start_index, &argument_index)) {
return false;
}
@@ -1288,13 +1290,13 @@ bool HInstructionBuilder::HandleInvoke(HInvoke* invoke,
bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor) {
+ const char* shorty) {
DCHECK(invoke->IsInvokeStaticOrDirect());
DCHECK(invoke->AsInvokeStaticOrDirect()->IsStringInit());
size_t start_index = 1;
size_t argument_index = 0;
- if (!SetupInvokeArguments(invoke, operands, descriptor, start_index, &argument_index)) {
+ if (!SetupInvokeArguments(invoke, operands, shorty, start_index, &argument_index)) {
return false;
}
@@ -1306,28 +1308,25 @@ bool HInstructionBuilder::HandleStringInit(HInvoke* invoke,
HInstruction* arg_this = LoadLocal(orig_this_reg, DataType::Type::kReference);
// Replacing the NewInstance might render it redundant. Keep a list of these
- // to be visited once it is clear whether it is has remaining uses.
+ // to be visited once it is clear whether it has remaining uses.
if (arg_this->IsNewInstance()) {
ssa_builder_->AddUninitializedString(arg_this->AsNewInstance());
+ // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
+ for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
+ if ((*current_locals_)[vreg] == arg_this) {
+ (*current_locals_)[vreg] = invoke;
+ }
+ }
} else {
- // The only reason a HPhi can flow in a String.<init> is when there is an
- // irreducible loop, which will create HPhi for all dex registers at loop entry.
DCHECK(arg_this->IsPhi());
- DCHECK(graph_->HasIrreducibleLoops());
- // Don't bother compiling a method in that situation. While we could look at all
- // phis related to the HNewInstance, it's not worth the trouble.
- MaybeRecordStat(compilation_stats_,
- MethodCompilationStat::kNotCompiledIrreducibleAndStringInit);
- return false;
- }
-
- // Walk over all vregs and replace any occurrence of `arg_this` with `invoke`.
- for (size_t vreg = 0, e = current_locals_->size(); vreg < e; ++vreg) {
- if ((*current_locals_)[vreg] == arg_this) {
- (*current_locals_)[vreg] = invoke;
- }
+ // We can get a phi as input of a String.<init> if there is a loop between the
+ // allocation and the String.<init> call. As we don't know which other phis might alias
+ // with `arg_this`, we keep a record of these phis and will analyze their inputs and
+ // uses once the inputs and users are populated (in ssa_builder.cc).
+ // Note: we only do this for phis, as it is a somewhat more expensive operation than
+ // what we're doing above when the input is the `HNewInstance`.
+ ssa_builder_->AddUninitializedStringPhi(arg_this->AsPhi(), invoke);
}
-
return true;
}
@@ -1774,7 +1773,6 @@ void HInstructionBuilder::BuildLoadString(dex::StringIndex string_index, uint32_
new (allocator_) HLoadString(graph_->GetCurrentMethod(), string_index, *dex_file_, dex_pc);
HSharpening::ProcessLoadString(load_string,
code_generator_,
- compiler_driver_,
*dex_compilation_unit_,
handles_);
AppendInstruction(load_string);
@@ -1816,7 +1814,6 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
HLoadClass::LoadKind load_kind = HSharpening::ComputeLoadClassKind(load_class,
code_generator_,
- compiler_driver_,
*dex_compilation_unit_);
if (load_kind == HLoadClass::LoadKind::kInvalid) {
@@ -1876,7 +1873,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
bool needs_access_check = LoadClassNeedsAccessCheck(klass);
TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
- klass.Get(), code_generator_, compiler_driver_, needs_access_check);
+ klass.Get(), code_generator_, needs_access_check);
HInstruction* class_or_null = nullptr;
HIntConstant* bitstring_path_to_root = nullptr;
@@ -2144,14 +2141,28 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
uint32_t args[5];
uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
- return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
+ return BuildInvokePolymorphic(dex_pc, method_idx, proto_idx, operands);
}
case Instruction::INVOKE_POLYMORPHIC_RANGE: {
uint16_t method_idx = instruction.VRegB_4rcc();
dex::ProtoIndex proto_idx(instruction.VRegH_4rcc());
RangeInstructionOperands operands(instruction.VRegC_4rcc(), instruction.VRegA_4rcc());
- return BuildInvokePolymorphic(instruction, dex_pc, method_idx, proto_idx, operands);
+ return BuildInvokePolymorphic(dex_pc, method_idx, proto_idx, operands);
+ }
+
+ case Instruction::INVOKE_CUSTOM: {
+ uint16_t call_site_idx = instruction.VRegB_35c();
+ uint32_t args[5];
+ uint32_t number_of_vreg_arguments = instruction.GetVarArgs(args);
+ VarArgsInstructionOperands operands(args, number_of_vreg_arguments);
+ return BuildInvokeCustom(dex_pc, call_site_idx, operands);
+ }
+
+ case Instruction::INVOKE_CUSTOM_RANGE: {
+ uint16_t call_site_idx = instruction.VRegB_3rc();
+ RangeInstructionOperands operands(instruction.VRegC_3rc(), instruction.VRegA_3rc());
+ return BuildInvokeCustom(dex_pc, call_site_idx, operands);
}
case Instruction::NEG_INT: {
@@ -2933,7 +2944,21 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
break;
}
- default:
+ case Instruction::UNUSED_3E:
+ case Instruction::UNUSED_3F:
+ case Instruction::UNUSED_40:
+ case Instruction::UNUSED_41:
+ case Instruction::UNUSED_42:
+ case Instruction::UNUSED_43:
+ case Instruction::UNUSED_79:
+ case Instruction::UNUSED_7A:
+ case Instruction::UNUSED_F3:
+ case Instruction::UNUSED_F4:
+ case Instruction::UNUSED_F5:
+ case Instruction::UNUSED_F6:
+ case Instruction::UNUSED_F7:
+ case Instruction::UNUSED_F8:
+ case Instruction::UNUSED_F9: {
VLOG(compiler) << "Did not compile "
<< dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex())
<< " because of unhandled instruction "
@@ -2941,6 +2966,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction,
MaybeRecordStat(compilation_stats_,
MethodCompilationStat::kNotCompiledUnhandledInstruction);
return false;
+ }
}
return true;
} // NOLINT(readability/fn_size)
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 2218a691ea..af7092a0cf 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -173,12 +173,17 @@ class HInstructionBuilder : public ValueObject {
// Builds an invocation node for invoke-polymorphic and returns whether the
// instruction is supported.
- bool BuildInvokePolymorphic(const Instruction& instruction,
- uint32_t dex_pc,
+ bool BuildInvokePolymorphic(uint32_t dex_pc,
uint32_t method_idx,
dex::ProtoIndex proto_idx,
const InstructionOperands& operands);
+ // Builds an invocation node for invoke-custom and returns whether the
+ // instruction is supported.
+ bool BuildInvokeCustom(uint32_t dex_pc,
+ uint32_t call_site_idx,
+ const InstructionOperands& operands);
+
// Builds a new array node and the instructions that fill it.
HNewArray* BuildFilledNewArray(uint32_t dex_pc,
dex::TypeIndex type_index,
@@ -253,19 +258,19 @@ class HInstructionBuilder : public ValueObject {
bool SetupInvokeArguments(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor,
+ const char* shorty,
size_t start_index,
size_t* argument_index);
bool HandleInvoke(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor,
- HClinitCheck* clinit_check,
- bool is_unresolved);
+ const char* shorty,
+ bool is_unresolved,
+ HClinitCheck* clinit_check = nullptr);
bool HandleStringInit(HInvoke* invoke,
const InstructionOperands& operands,
- const char* descriptor);
+ const char* shorty);
void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
HClinitCheck* ProcessClinitCheckForInvoke(
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 63704a470e..70af49f8f0 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -36,11 +36,9 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
public:
InstructionSimplifierVisitor(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
OptimizingCompilerStats* stats)
: HGraphDelegateVisitor(graph),
codegen_(codegen),
- compiler_driver_(compiler_driver),
stats_(stats) {}
bool Run();
@@ -117,6 +115,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyFP2Int(HInvoke* invoke);
void SimplifyStringCharAt(HInvoke* invoke);
void SimplifyStringIsEmptyOrLength(HInvoke* invoke);
+ void SimplifyStringIndexOf(HInvoke* invoke);
void SimplifyNPEOnArgN(HInvoke* invoke, size_t);
void SimplifyReturnThis(HInvoke* invoke);
void SimplifyAllocationIntrinsic(HInvoke* invoke);
@@ -126,7 +125,6 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void SimplifyAbs(HInvoke* invoke, DataType::Type type);
CodeGenerator* codegen_;
- CompilerDriver* compiler_driver_;
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
@@ -143,7 +141,7 @@ bool InstructionSimplifier::Run() {
visitor.VisitReversePostOrder();
}
- InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_);
+ InstructionSimplifierVisitor visitor(graph_, codegen_, stats_);
return visitor.Run();
}
@@ -637,8 +635,8 @@ void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
return;
}
- // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
- // the return value check with the `outcome` check, b/27651442 .
+ // Historical note: The `outcome` was initialized to please Valgrind - the compiler can reorder
+ // the return value check with the `outcome` check, b/27651442.
bool outcome = false;
if (TypeCheckHasKnownOutcome(check_cast->GetTargetClassRTI(), object, &outcome)) {
if (outcome) {
@@ -683,8 +681,8 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
return;
}
- // Note: The `outcome` is initialized to please valgrind - the compiler can reorder
- // the return value check with the `outcome` check, b/27651442 .
+ // Historical note: The `outcome` was initialized to please Valgrind - the compiler can reorder
+ // the return value check with the `outcome` check, b/27651442.
bool outcome = false;
if (TypeCheckHasKnownOutcome(instruction->GetTargetClassRTI(), object, &outcome)) {
MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
@@ -2308,7 +2306,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
// the invoke, as we would need to look it up in the current dex file, and it
// is unlikely that it exists. The most usual situation for such typed
// arraycopy methods is a direct pointer to the boot image.
- HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_, compiler_driver_);
+ HSharpening::SharpenInvokeStaticOrDirect(invoke, codegen_);
}
}
}
@@ -2417,6 +2415,43 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, replacement);
}
+void InstructionSimplifierVisitor::SimplifyStringIndexOf(HInvoke* invoke) {
+ DCHECK(invoke->GetIntrinsic() == Intrinsics::kStringIndexOf ||
+ invoke->GetIntrinsic() == Intrinsics::kStringIndexOfAfter);
+ if (invoke->InputAt(0)->IsLoadString()) {
+ HLoadString* load_string = invoke->InputAt(0)->AsLoadString();
+ const DexFile& dex_file = load_string->GetDexFile();
+ uint32_t utf16_length;
+ const char* data =
+ dex_file.StringDataAndUtf16LengthByIdx(load_string->GetStringIndex(), &utf16_length);
+ if (utf16_length == 0) {
+ invoke->ReplaceWith(GetGraph()->GetIntConstant(-1));
+ invoke->GetBlock()->RemoveInstruction(invoke);
+ RecordSimplification();
+ return;
+ }
+ if (utf16_length == 1 && invoke->GetIntrinsic() == Intrinsics::kStringIndexOf) {
+ // Simplify to HSelect(HEquals(., load_string.charAt(0)), 0, -1).
+ // If the sought character is supplementary, this gives the correct result, i.e. -1.
+ uint32_t c = GetUtf16FromUtf8(&data);
+ DCHECK_EQ(GetTrailingUtf16Char(c), 0u);
+ DCHECK_EQ(GetLeadingUtf16Char(c), c);
+ uint32_t dex_pc = invoke->GetDexPc();
+ ArenaAllocator* allocator = GetGraph()->GetAllocator();
+ HEqual* equal =
+ new (allocator) HEqual(invoke->InputAt(1), GetGraph()->GetIntConstant(c), dex_pc);
+ invoke->GetBlock()->InsertInstructionBefore(equal, invoke);
+ HSelect* result = new (allocator) HSelect(equal,
+ GetGraph()->GetIntConstant(0),
+ GetGraph()->GetIntConstant(-1),
+ dex_pc);
+ invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, result);
+ RecordSimplification();
+ return;
+ }
+ }
+}
+
// This method should only be used on intrinsics whose sole way of throwing an
// exception is raising a NPE when the nth argument is null. If that argument
// is provably non-null, we can clear the flag.
@@ -2554,6 +2589,10 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
case Intrinsics::kStringLength:
SimplifyStringIsEmptyOrLength(instruction);
break;
+ case Intrinsics::kStringIndexOf:
+ case Intrinsics::kStringIndexOfAfter:
+ SimplifyStringIndexOf(instruction);
+ break;
case Intrinsics::kStringStringIndexOf:
case Intrinsics::kStringStringIndexOfAfter:
SimplifyNPEOnArgN(instruction, 1); // 0th has own NullCheck
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index f409e873de..2d134e0067 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -24,7 +24,6 @@
namespace art {
class CodeGenerator;
-class CompilerDriver;
/**
* Implements optimizations specific to each instruction.
@@ -40,12 +39,10 @@ class InstructionSimplifier : public HOptimization {
public:
InstructionSimplifier(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
OptimizingCompilerStats* stats = nullptr,
const char* name = kInstructionSimplifierPassName)
: HOptimization(graph, name, stats),
- codegen_(codegen),
- compiler_driver_(compiler_driver) {}
+ codegen_(codegen) {}
static constexpr const char* kInstructionSimplifierPassName = "instruction_simplifier";
@@ -53,7 +50,6 @@ class InstructionSimplifier : public HOptimization {
private:
CodeGenerator* codegen_;
- CompilerDriver* compiler_driver_;
DISALLOW_COPY_AND_ASSIGN(InstructionSimplifier);
};
diff --git a/compiler/optimizing/instruction_simplifier_x86.cc b/compiler/optimizing/instruction_simplifier_x86.cc
new file mode 100644
index 0000000000..b3f67d6e84
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.cc
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "instruction_simplifier_x86.h"
+#include "arch/x86/instruction_set_features_x86.h"
+#include "mirror/array-inl.h"
+#include "code_generator.h"
+
+
+namespace art {
+
+namespace x86 {
+
+class InstructionSimplifierX86Visitor : public HGraphVisitor {
+ public:
+ InstructionSimplifierX86Visitor(HGraph* graph,
+ CodeGeneratorX86 *codegen,
+ OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), codegen_(codegen), stats_(stats) {}
+
+ private:
+ void RecordSimplification() {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
+ }
+
+ bool HasCpuFeatureFlag() {
+ return (codegen_->GetInstructionSetFeatures().HasAVX2());
+ }
+
+ /**
+ * This simplifier uses a special-purpose BB visitor.
+ * (1) No need to visit Phi nodes.
+ * (2) Since statements can be removed in a "forward" fashion,
+ * the visitor should test if each statement is still there.
+ */
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+ // TODO: fragile iteration, provide more robust iterators?
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInBlock()) {
+ instruction->Accept(this);
+ }
+ }
+ }
+
+ bool TryGenerateVecMultiplyAccumulate(HVecMul* mul);
+ void VisitVecMul(HVecMul* instruction) OVERRIDE;
+
+ CodeGeneratorX86* codegen_;
+ OptimizingCompilerStats* stats_;
+};
+
+/* generic expressions for FMA
+a = (b * c) + a
+a = (b * c) – a
+*/
+bool InstructionSimplifierX86Visitor::TryGenerateVecMultiplyAccumulate(HVecMul* mul) {
+ if (!(mul->GetPackedType() == DataType::Type::kFloat32 ||
+ mul->GetPackedType() == DataType::Type::kFloat64)) {
+ return false;
+ }
+ ArenaAllocator* allocator = mul->GetBlock()->GetGraph()->GetAllocator();
+ if (mul->HasOnlyOneNonEnvironmentUse()) {
+ HInstruction* use = mul->GetUses().front().GetUser();
+ if (use->IsVecAdd() || use->IsVecSub()) {
+ // Replace code looking like
+ // VECMUL tmp, x, y
+ // VECADD dst, acc, tmp or VECADD dst, tmp, acc
+ // or
+ // VECSUB dst, tmp, acc
+ // with
+ // VECMULACC dst, acc, x, y
+
+ // Note that we do not want to (unconditionally) perform the merge when the
+ // multiplication has multiple uses and it can be merged in all of them.
+ // Multiple uses could happen on the same control-flow path, and we would
+ // then increase the amount of work. In the future we could try to evaluate
+ // whether all uses are on different control-flow paths (using dominance and
+ // reverse-dominance information) and only perform the merge when they are.
+ HInstruction* accumulator = nullptr;
+ HVecBinaryOperation* binop = use->AsVecBinaryOperation();
+ HInstruction* binop_left = binop->GetLeft();
+ HInstruction* binop_right = binop->GetRight();
+ DCHECK_NE(binop_left, binop_right);
+ if (use->IsVecSub()) {
+ if (binop_left == mul) {
+ accumulator = binop_right;
+ }
+ } else {
+ // VecAdd
+ if (binop_right == mul) {
+ accumulator = binop_left;
+ } else {
+ DCHECK_EQ(binop_left, mul);
+ accumulator = binop_right;
+ }
+ }
+ HInstruction::InstructionKind kind =
+ use->IsVecAdd() ? HInstruction::kAdd : HInstruction::kSub;
+
+ if (accumulator != nullptr) {
+ HVecMultiplyAccumulate* mulacc =
+ new (allocator) HVecMultiplyAccumulate(allocator,
+ kind,
+ accumulator,
+ mul->GetLeft(),
+ mul->GetRight(),
+ binop->GetPackedType(),
+ binop->GetVectorLength(),
+ binop->GetDexPc());
+ binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
+ DCHECK(!mul->HasUses());
+ mul->GetBlock()->RemoveInstruction(mul);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+void InstructionSimplifierX86Visitor::VisitVecMul(HVecMul* instruction) {
+ if (HasCpuFeatureFlag()) {
+ if (TryGenerateVecMultiplyAccumulate(instruction)) {
+ RecordSimplification();
+ }
+ }
+}
+
+bool InstructionSimplifierX86::Run() {
+ InstructionSimplifierX86Visitor visitor(graph_, codegen_, stats_);
+ visitor.VisitReversePostOrder();
+ return true;
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_x86.h b/compiler/optimizing/instruction_simplifier_x86.h
new file mode 100644
index 0000000000..1fb199f728
--- /dev/null
+++ b/compiler/optimizing/instruction_simplifier_x86.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+#include "code_generator_x86.h"
+
+namespace art {
+namespace x86 {
+
+class InstructionSimplifierX86 : public HOptimization {
+ public:
+ InstructionSimplifierX86(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierX86PassName, stats),
+ codegen_(down_cast<CodeGeneratorX86*>(codegen)) {}
+
+ static constexpr const char* kInstructionSimplifierX86PassName = "instruction_simplifier_x86";
+
+ bool Run() OVERRIDE;
+
+ private:
+ CodeGeneratorX86* codegen_;
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_X86_H_
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
new file mode 100644
index 0000000000..3c20ad698b
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "intrinsic_objects.h"
+
+#include "art_field-inl.h"
+#include "base/logging.h"
+#include "class_root.h"
+#include "handle.h"
+#include "obj_ptr-inl.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
+ ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
+ self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr);
+ if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
+ return nullptr;
+ }
+ ArtField* cache_field =
+ integer_cache_class->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
+ CHECK(cache_field != nullptr);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> integer_cache =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ cache_field->GetObject(integer_cache_class));
+ CHECK(integer_cache != nullptr);
+ return integer_cache;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The objects used for the Integer.valueOf() intrinsic must remain live even if references
+ // to them are removed using reflection. Image roots are not accessible through reflection,
+ // so the array we construct here shall keep them alive.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
+ hs.NewHandle(LookupIntegerCache(self, class_linker));
+ size_t live_objects_size =
+ (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
+ ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
+ mirror::ObjectArray<mirror::Object>::Alloc(
+ self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
+ int32_t index = 0;
+ if (integer_cache != nullptr) {
+ live_objects->Set(index++, integer_cache.Get());
+ for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
+ live_objects->Set(index++, integer_cache->Get(i));
+ }
+ }
+ CHECK_EQ(index, live_objects->GetLength());
+
+ if (kIsDebugBuild && integer_cache != nullptr) {
+ CHECK_EQ(integer_cache.Get(), GetIntegerValueOfCache(live_objects));
+ for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
+ CHECK_EQ(integer_cache->GetWithoutChecks(i), GetIntegerValueOfObject(live_objects, i));
+ }
+ }
+ return live_objects;
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK(boot_image_live_objects != nullptr);
+ if (boot_image_live_objects->GetLength() == 0u) {
+ return nullptr; // No intrinsic objects.
+ }
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(0);
+ DCHECK(result != nullptr);
+ DCHECK(result->IsObjectArray());
+ DCHECK(result->GetClass()->DescriptorEquals("[Ljava/lang/Integer;"));
+ return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(result);
+}
+
+ObjPtr<mirror::Object> IntrinsicObjects::GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) {
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ DCHECK_LT(index,
+ static_cast<uint32_t>(GetIntegerValueOfCache(boot_image_live_objects)->GetLength()));
+
+ // No need for read barrier for boot image object or for verifying the value that was just stored.
+ ObjPtr<mirror::Object> result =
+ boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
+ /* skip the IntegerCache.cache */ 1u + index);
+ DCHECK(result != nullptr);
+ DCHECK(result->GetClass()->DescriptorEquals("Ljava/lang/Integer;"));
+ return result;
+}
+
+MemberOffset IntrinsicObjects::GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
+ DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+ MemberOffset result = mirror::ObjectArray<mirror::Object>::OffsetOfElement(1u);
+ DCHECK_EQ(GetIntegerValueOfObject(boot_image_live_objects, 0u),
+ (boot_image_live_objects
+ ->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(result)));
+ return result;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
new file mode 100644
index 0000000000..863017be38
--- /dev/null
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+#define ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
+
+#include "base/bit_field.h"
+#include "base/bit_utils.h"
+#include "base/mutex.h"
+
+namespace art {
+
+class ClassLinker;
+template <class MirrorType> class ObjPtr;
+class MemberOffset;
+class Thread;
+
+namespace mirror {
+class Object;
+template <class T> class ObjectArray;
+} // namespace mirror
+
+class IntrinsicObjects {
+ public:
+ enum class PatchType {
+ kIntegerValueOfObject,
+ kIntegerValueOfArray,
+
+ kLast = kIntegerValueOfArray
+ };
+
+ static uint32_t EncodePatch(PatchType patch_type, uint32_t index = 0u) {
+ DCHECK(patch_type == PatchType::kIntegerValueOfObject || index == 0u);
+ return PatchTypeField::Encode(static_cast<uint32_t>(patch_type)) | IndexField::Encode(index);
+ }
+
+ static PatchType DecodePatchType(uint32_t intrinsic_data) {
+ return static_cast<PatchType>(PatchTypeField::Decode(intrinsic_data));
+ }
+
+ static uint32_t DecodePatchIndex(uint32_t intrinsic_data) {
+ return IndexField::Decode(intrinsic_data);
+ }
+
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
+ Thread* self,
+ ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Functions for retrieving data for Integer.valueOf().
+ static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerValueOfCache(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Object> GetIntegerValueOfObject(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ uint32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
+ static MemberOffset GetIntegerValueOfArrayDataOffset(
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ static constexpr size_t kPatchTypeBits =
+ MinimumBitsToStore(static_cast<uint32_t>(PatchType::kLast));
+ static constexpr size_t kIndexBits = BitSizeOf<uint32_t>() - kPatchTypeBits;
+ using PatchTypeField = BitField<uint32_t, 0u, kPatchTypeBits>;
+ using IndexField = BitField<uint32_t, kPatchTypeBits, kIndexBits>;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_INTRINSIC_OBJECTS_H_
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 056f533398..21efe11f31 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -21,10 +21,12 @@
#include "base/utils.h"
#include "class_linker.h"
#include "dex/invoke_type.h"
-#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
-#include "mirror/dex_cache-inl.h"
+#include "gc/space/image_space.h"
+#include "image-inl.h"
+#include "intrinsic_objects.h"
#include "nodes.h"
+#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -142,6 +144,7 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
case kSuper:
case kInterface:
case kPolymorphic:
+ case kCustom:
return false;
}
LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
@@ -220,112 +223,315 @@ std::ostream& operator<<(std::ostream& os, const Intrinsics& intrinsic) {
return os;
}
+static const char kIntegerCacheDescriptor[] = "Ljava/lang/Integer$IntegerCache;";
+static const char kIntegerDescriptor[] = "Ljava/lang/Integer;";
+static const char kIntegerArrayDescriptor[] = "[Ljava/lang/Integer;";
+static const char kLowFieldName[] = "low";
+static const char kHighFieldName[] = "high";
+static const char kValueFieldName[] = "value";
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> GetBootImageLiveObjects()
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<gc::space::ImageSpace*>& boot_image_spaces = heap->GetBootImageSpaces();
+ DCHECK(!boot_image_spaces.empty());
+ const ImageHeader& main_header = boot_image_spaces[0]->GetImageHeader();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+ ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+ main_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kBootImageLiveObjects));
+ DCHECK(boot_image_live_objects != nullptr);
+ DCHECK(heap->ObjectIsInBootImageSpace(boot_image_live_objects));
+ return boot_image_live_objects;
+}
+
+static ObjPtr<mirror::Class> LookupInitializedClass(Thread* self,
+ ClassLinker* class_linker,
+ const char* descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> klass =
+ class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr);
+ DCHECK(klass != nullptr);
+ DCHECK(klass->IsInitialized());
+ return klass;
+}
+
+static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerCacheArray(
+ ObjPtr<mirror::Class> cache_class) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* cache_field = cache_class->FindDeclaredStaticField("cache", kIntegerArrayDescriptor);
+ DCHECK(cache_field != nullptr);
+ return ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(cache_field->GetObject(cache_class));
+}
+
+static int32_t GetIntegerCacheField(ObjPtr<mirror::Class> cache_class, const char* field_name)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtField* field = cache_class->FindDeclaredStaticField(field_name, "I");
+ DCHECK(field != nullptr);
+ return field->GetInt(cache_class);
+}
+
+static bool CheckIntegerCache(Thread* self,
+ ClassLinker* class_linker,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_cache)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(boot_image_cache != nullptr);
+
+ // Since we have a cache in the boot image, both java.lang.Integer and
+ // java.lang.Integer$IntegerCache must be initialized in the boot image.
+ ObjPtr<mirror::Class> cache_class =
+ LookupInitializedClass(self, class_linker, kIntegerCacheDescriptor);
+ ObjPtr<mirror::Class> integer_class =
+ LookupInitializedClass(self, class_linker, kIntegerDescriptor);
+
+ // Check that the current cache is the same as the `boot_image_cache`.
+ ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache = GetIntegerCacheArray(cache_class);
+ if (current_cache != boot_image_cache) {
+ return false; // Messed up IntegerCache.cache.
+ }
+
+ // Check that the range matches the boot image cache length.
+ int32_t low = GetIntegerCacheField(cache_class, kLowFieldName);
+ int32_t high = GetIntegerCacheField(cache_class, kHighFieldName);
+ if (boot_image_cache->GetLength() != high - low + 1) {
+ return false; // Messed up IntegerCache.low or IntegerCache.high.
+ }
+
+ // Check that the elements match the boot image intrinsic objects and check their values as well.
+ ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
+ DCHECK(value_field != nullptr);
+ for (int32_t i = 0, len = boot_image_cache->GetLength(); i != len; ++i) {
+ ObjPtr<mirror::Object> boot_image_object =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, i);
+ DCHECK(Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boot_image_object));
+ // No need for read barrier for comparison with a boot image object.
+ ObjPtr<mirror::Object> current_object =
+ boot_image_cache->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(i);
+ if (boot_image_object != current_object) {
+ return false; // Messed up IntegerCache.cache[i]
+ }
+ if (value_field->GetInt(boot_image_object) != low + i) {
+ return false; // Messed up IntegerCache.cache[i].value.
+ }
+ }
+
+ return true;
+}
+
void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke,
CodeGenerator* codegen,
Location return_location,
Location first_argument_location) {
- if (Runtime::Current()->IsAotCompiler()) {
- if (codegen->GetCompilerOptions().IsBootImage() ||
- codegen->GetCompilerOptions().GetCompilePic()) {
- // TODO(ngeoffray): Support boot image compilation.
+ // The intrinsic will call if it needs to allocate a j.l.Integer.
+ LocationSummary::CallKind call_kind = LocationSummary::kCallOnMainOnly;
+ const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
+ if (compiler_options.IsBootImage()) {
+ // Piggyback on the method load kind to determine whether we can use PC-relative addressing.
+ // This should cover both the testing config (non-PIC boot image) and codegens that reject
+ // PC-relative load kinds and fall back to the runtime call.
+ if (!invoke->AsInvokeStaticOrDirect()->HasPcRelativeMethodLoadKind()) {
+ return;
+ }
+ if (!compiler_options.IsImageClass(kIntegerCacheDescriptor) ||
+ !compiler_options.IsImageClass(kIntegerDescriptor)) {
+ return;
+ }
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ObjPtr<mirror::Class> cache_class = class_linker->LookupClass(
+ self, kIntegerCacheDescriptor, /* class_loader */ nullptr);
+ DCHECK(cache_class != nullptr);
+ if (UNLIKELY(!cache_class->IsInitialized())) {
+ LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized.";
return;
}
+ ObjPtr<mirror::Class> integer_class =
+ class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr);
+ DCHECK(integer_class != nullptr);
+ if (UNLIKELY(!integer_class->IsInitialized())) {
+ LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized.";
+ return;
+ }
+ int32_t low = GetIntegerCacheField(cache_class, kLowFieldName);
+ int32_t high = GetIntegerCacheField(cache_class, kHighFieldName);
+ if (kIsDebugBuild) {
+ ObjPtr<mirror::ObjectArray<mirror::Object>> current_cache = GetIntegerCacheArray(cache_class);
+ CHECK(current_cache != nullptr);
+ CHECK_EQ(current_cache->GetLength(), high - low + 1);
+ ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
+ CHECK(value_field != nullptr);
+ for (int32_t i = 0, len = current_cache->GetLength(); i != len; ++i) {
+ ObjPtr<mirror::Object> current_object = current_cache->GetWithoutChecks(i);
+ CHECK(current_object != nullptr);
+ CHECK_EQ(value_field->GetInt(current_object), low + i);
+ }
+ }
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
+ static_cast<uint32_t>(high - low + 1)) {
+ // No call, we shall use direct pointer to the Integer object.
+ call_kind = LocationSummary::kNoCall;
+ }
+ }
+ } else {
+ Runtime* runtime = Runtime::Current();
+ if (runtime->GetHeap()->GetBootImageSpaces().empty()) {
+ return; // Running without boot image, cannot use required boot image objects.
+ }
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> cache =
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects);
+ if (cache == nullptr) {
+ return; // No cache in the boot image.
+ }
+ if (runtime->UseJitCompilation()) {
+ if (!CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache)) {
+ return; // The cache was somehow messed up, probably by using reflection.
+ }
+ } else {
+ DCHECK(runtime->IsAotCompiler());
+ DCHECK(CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache));
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ // Retrieve the `value` from the lowest cached Integer.
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class =
+ low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
+ DCHECK(value_field != nullptr);
+ int32_t low = value_field->GetInt(low_integer);
+ if (static_cast<uint32_t>(value) - static_cast<uint32_t>(low) <
+ static_cast<uint32_t>(cache->GetLength())) {
+ // No call, we shall use direct pointer to the Integer object. Note that we cannot
+ // do this for JIT as the "low" can change through reflection before emitting the code.
+ call_kind = LocationSummary::kNoCall;
+ }
+ }
+ }
}
- IntegerValueOfInfo info = ComputeIntegerValueOfInfo();
-
- // Most common case is that we have found all we needed (classes are initialized
- // and in the boot image). Bail if not.
- if (info.integer_cache == nullptr ||
- info.integer == nullptr ||
- info.cache == nullptr ||
- info.value_offset == 0 ||
- // low and high cannot be 0, per the spec.
- info.low == 0 ||
- info.high == 0) {
- LOG(INFO) << "Integer.valueOf will not be optimized";
- return;
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke, call_kind, kIntrinsified);
+ if (call_kind == LocationSummary::kCallOnMainOnly) {
+ locations->SetInAt(0, Location::RegisterOrConstant(invoke->InputAt(0)));
+ locations->AddTemp(first_argument_location);
+ locations->SetOut(return_location);
+ } else {
+ locations->SetInAt(0, Location::ConstantLocation(invoke->InputAt(0)->AsConstant()));
+ locations->SetOut(Location::RequiresRegister());
}
+}
- // The intrinsic will call if it needs to allocate a j.l.Integer.
- LocationSummary* locations = new (invoke->GetBlock()->GetGraph()->GetAllocator()) LocationSummary(
- invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
- if (!invoke->InputAt(0)->IsConstant()) {
- locations->SetInAt(0, Location::RequiresRegister());
- }
- locations->AddTemp(first_argument_location);
- locations->SetOut(return_location);
+static int32_t GetIntegerCacheLowFromIntegerCache(Thread* self, ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> cache_class =
+ LookupInitializedClass(self, class_linker, kIntegerCacheDescriptor);
+ return GetIntegerCacheField(cache_class, kLowFieldName);
+}
+
+static uint32_t CalculateBootImageOffset(ObjPtr<mirror::Object> object)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ DCHECK(heap->ObjectIsInBootImageSpace(object));
+ return reinterpret_cast<const uint8_t*>(object.Ptr()) - heap->GetBootImageSpaces()[0]->Begin();
}
-IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo() {
+inline IntrinsicVisitor::IntegerValueOfInfo::IntegerValueOfInfo()
+ : value_offset(0),
+ low(0),
+ length(0u),
+ integer_boot_image_offset(kInvalidReference),
+ value_boot_image_reference(kInvalidReference) {}
+
+IntrinsicVisitor::IntegerValueOfInfo IntrinsicVisitor::ComputeIntegerValueOfInfo(
+ HInvoke* invoke, const CompilerOptions& compiler_options) {
// Note that we could cache all of the data looked up here. but there's no good
// location for it. We don't want to add it to WellKnownClasses, to avoid creating global
// jni values. Adding it as state to the compiler singleton seems like wrong
// separation of concerns.
// The need for this data should be pretty rare though.
- // The most common case is that the classes are in the boot image and initialized,
- // which is easy to generate code for. We bail if not.
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
+ // Note that at this point we can no longer abort the code generation. Therefore,
+ // we need to provide data that shall not lead to a crash even if the fields were
+ // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
+
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
- gc::Heap* heap = runtime->GetHeap();
- IntegerValueOfInfo info;
- info.integer_cache =
- class_linker->FindSystemClass(self, "Ljava/lang/Integer$IntegerCache;").Ptr();
- if (info.integer_cache == nullptr) {
- self->ClearException();
- return info;
- }
- if (!heap->ObjectIsInBootImageSpace(info.integer_cache) || !info.integer_cache->IsInitialized()) {
- // Optimization only works if the class is initialized and in the boot image.
- return info;
- }
- info.integer = class_linker->FindSystemClass(self, "Ljava/lang/Integer;").Ptr();
- if (info.integer == nullptr) {
- self->ClearException();
- return info;
- }
- if (!heap->ObjectIsInBootImageSpace(info.integer) || !info.integer->IsInitialized()) {
- // Optimization only works if the class is initialized and in the boot image.
- return info;
- }
-
- ArtField* field = info.integer_cache->FindDeclaredStaticField("cache", "[Ljava/lang/Integer;");
- if (field == nullptr) {
- return info;
- }
- info.cache = static_cast<mirror::ObjectArray<mirror::Object>*>(
- field->GetObject(info.integer_cache).Ptr());
- if (info.cache == nullptr) {
- return info;
- }
-
- if (!heap->ObjectIsInBootImageSpace(info.cache)) {
- // Optimization only works if the object is in the boot image.
- return info;
- }
-
- field = info.integer->FindDeclaredInstanceField("value", "I");
- if (field == nullptr) {
- return info;
- }
- info.value_offset = field->GetOffset().Int32Value();
-
- field = info.integer_cache->FindDeclaredStaticField("low", "I");
- if (field == nullptr) {
- return info;
- }
- info.low = field->GetInt(info.integer_cache);
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
- field = info.integer_cache->FindDeclaredStaticField("high", "I");
- if (field == nullptr) {
- return info;
+ IntegerValueOfInfo info;
+ if (compiler_options.IsBootImage()) {
+ ObjPtr<mirror::Class> integer_class =
+ LookupInitializedClass(self, class_linker, kIntegerDescriptor);
+ ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
+ DCHECK(value_field != nullptr);
+ info.value_offset = value_field->GetOffset().Uint32Value();
+ ObjPtr<mirror::Class> cache_class =
+ LookupInitializedClass(self, class_linker, kIntegerCacheDescriptor);
+ info.low = GetIntegerCacheField(cache_class, kLowFieldName);
+ int32_t high = GetIntegerCacheField(cache_class, kHighFieldName);
+ info.length = dchecked_integral_cast<uint32_t>(high - info.low + 1);
+
+ info.integer_boot_image_offset = IntegerValueOfInfo::kInvalidReference;
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
+ if (index < static_cast<uint32_t>(info.length)) {
+ info.value_boot_image_reference = IntrinsicObjects::EncodePatch(
+ IntrinsicObjects::PatchType::kIntegerValueOfObject, index);
+ } else {
+ // Not in the cache.
+ info.value_boot_image_reference = IntegerValueOfInfo::kInvalidReference;
+ }
+ } else {
+ info.array_data_boot_image_reference =
+ IntrinsicObjects::EncodePatch(IntrinsicObjects::PatchType::kIntegerValueOfArray);
+ }
+ } else {
+ ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects = GetBootImageLiveObjects();
+ ObjPtr<mirror::Object> low_integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, 0u);
+ ObjPtr<mirror::Class> integer_class = low_integer->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
+ DCHECK(value_field != nullptr);
+ info.value_offset = value_field->GetOffset().Uint32Value();
+ if (runtime->UseJitCompilation()) {
+ // Use the current `IntegerCache.low` for JIT to avoid truly surprising behavior if the
+ // code messes up the `value` field in the lowest cached Integer using reflection.
+ info.low = GetIntegerCacheLowFromIntegerCache(self, class_linker);
+ } else {
+ // For app AOT, the `low_integer->value` should be the same as `IntegerCache.low`.
+ info.low = value_field->GetInt(low_integer);
+ DCHECK_EQ(info.low, GetIntegerCacheLowFromIntegerCache(self, class_linker));
+ }
+ // Do not look at `IntegerCache.high`, use the immutable length of the cache array instead.
+ info.length = dchecked_integral_cast<uint32_t>(
+ IntrinsicObjects::GetIntegerValueOfCache(boot_image_live_objects)->GetLength());
+
+ info.integer_boot_image_offset = CalculateBootImageOffset(integer_class);
+ if (invoke->InputAt(0)->IsIntConstant()) {
+ int32_t input_value = invoke->InputAt(0)->AsIntConstant()->GetValue();
+ uint32_t index = static_cast<uint32_t>(input_value) - static_cast<uint32_t>(info.low);
+ if (index < static_cast<uint32_t>(info.length)) {
+ ObjPtr<mirror::Object> integer =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects, index);
+ info.value_boot_image_reference = CalculateBootImageOffset(integer);
+ } else {
+ // Not in the cache.
+ info.value_boot_image_reference = IntegerValueOfInfo::kInvalidReference;
+ }
+ } else {
+ info.array_data_boot_image_reference =
+ CalculateBootImageOffset(boot_image_live_objects) +
+ IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects).Uint32Value();
+ }
}
- info.high = field->GetInt(info.integer_cache);
- DCHECK_EQ(info.cache->GetLength(), info.high - info.low + 1);
return info;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 30cffac015..993648f765 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -126,33 +126,41 @@ class IntrinsicVisitor : public ValueObject {
Location return_location,
Location first_argument_location);
- // Temporary data structure for holding Integer.valueOf useful data. We only
- // use it if the mirror::Class* are in the boot image, so it is fine to keep raw
- // mirror::Class pointers in this structure.
+ // Temporary data structure for holding Integer.valueOf data for generating code.
+ // We only use it if the boot image contains the IntegerCache objects.
struct IntegerValueOfInfo {
- IntegerValueOfInfo()
- : integer_cache(nullptr),
- integer(nullptr),
- cache(nullptr),
- low(0),
- high(0),
- value_offset(0) {}
-
- // The java.lang.IntegerCache class.
- mirror::Class* integer_cache;
- // The java.lang.Integer class.
- mirror::Class* integer;
- // Value of java.lang.IntegerCache#cache.
- mirror::ObjectArray<mirror::Object>* cache;
- // Value of java.lang.IntegerCache#low.
+ static constexpr uint32_t kInvalidReference = static_cast<uint32_t>(-1);
+
+ IntegerValueOfInfo();
+
+ // Offset of the Integer.value field for initializing a newly allocated instance.
+ uint32_t value_offset;
+ // The low value in the cache.
int32_t low;
- // Value of java.lang.IntegerCache#high.
- int32_t high;
- // The offset of java.lang.Integer.value.
- int32_t value_offset;
+ // The length of the cache array.
+ uint32_t length;
+
+ // Boot image offset of java.lang.Integer for allocating an instance.
+ uint32_t integer_boot_image_offset; // Set to kInvalidReference when compiling the boot image.
+
+ // This union contains references to the boot image. For app AOT or JIT compilation,
+ // these are the boot image offsets of the target. For boot image compilation, the
+ // location shall be known only at link time, so we encode a symbolic reference using
+ // IntrinsicObjects::EncodePatch().
+ union {
+ // The target value for a constant input in the cache range. If the constant input
+ // is out of range (use `low` and `length` to check), this value is bogus (set to
+ // kInvalidReference) and the code must allocate a new Integer.
+ uint32_t value_boot_image_reference;
+
+ // The cache array data used for a non-constant input in the cache range.
+ // If the input is out of range, the code must allocate a new Integer.
+ uint32_t array_data_boot_image_reference;
+ };
};
- static IntegerValueOfInfo ComputeIntegerValueOfInfo();
+ static IntegerValueOfInfo ComputeIntegerValueOfInfo(
+ HInvoke* invoke, const CompilerOptions& compiler_options);
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index c3d643a7d1..4b2bcc8ca8 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2791,33 +2791,27 @@ void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
MacroAssembler* masm = GetVIXLAssembler();
Register out = RegisterFrom(locations->Out(), DataType::Type::kReference);
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
- InvokeRuntimeCallingConvention calling_convention;
- Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ Mov(temp.W(), value);
__ Str(temp.W(), HeapOperand(out.W(), info.value_offset));
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
@@ -2825,16 +2819,15 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = RegisterFrom(locations->InAt(0), DataType::Type::kInt32);
// Check bounds of our cache.
__ Add(out.W(), in.W(), -info.low);
- __ Cmp(out.W(), info.high - info.low + 1);
+ __ Cmp(out.W(), info.length);
vixl::aarch64::Label allocate, done;
__ B(&allocate, hs);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp.W(), codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
MemOperand source = HeapOperand(
temp, out.X(), LSL, DataType::SizeShift(DataType::Type::kReference));
codegen_->Load(DataType::Type::kReference, out, source);
@@ -2842,10 +2835,8 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) {
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ Str(in.W(), HeapOperand(out.W(), info.value_offset));
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index fecf1ccbfa..f11e5a1989 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2940,33 +2940,27 @@ void IntrinsicLocationsBuilderARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
ArmVIXLAssembler* const assembler = GetAssembler();
vixl32::Register out = RegisterFrom(locations->Out());
UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- InvokeRuntimeCallingConventionARMVIXL calling_convention;
- vixl32::Register argument = calling_convention.GetRegisterAt(0);
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ Mov(temp, value);
assembler->StoreToOffset(kStoreWord, temp, out, info.value_offset);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
@@ -2974,25 +2968,22 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) {
codegen_->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
vixl32::Register in = RegisterFrom(locations->InAt(0));
// Check bounds of our cache.
__ Add(out, in, -info.low);
- __ Cmp(out, info.high - info.low + 1);
+ __ Cmp(out, info.length);
vixl32::Label allocate, done;
__ B(hs, &allocate, /* is_far_target */ false);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ Ldr(temp, codegen_->DeduplicateBootImageAddressLiteral(data_offset + address));
+ codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference);
codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out);
assembler->MaybeUnpoisonHeapReference(out);
__ B(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ Ldr(argument, codegen_->DeduplicateBootImageAddressLiteral(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
assembler->StoreToOffset(kStoreWord, in, out, info.value_offset);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index ae248a3e5c..01d9f962f2 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2601,59 +2601,50 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
MipsAssembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS* icodegen =
down_cast<InstructionCodeGeneratorMIPS*>(codegen_->GetInstructionVisitor());
Register out = locations->Out().AsRegister<Register>();
- InvokeRuntimeCallingConvention calling_convention;
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst32(out, address);
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
MipsLabel allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- if (IsInt<16>(count)) {
- __ Sltiu(AT, out, count);
+ // As unsigned quantities is out < info.length ?
+ if (IsUint<15>(info.length)) {
+ __ Sltiu(AT, out, info.length);
} else {
- __ LoadConst32(AT, count);
+ __ LoadConst32(AT, info.length);
__ Sltu(AT, out, AT);
}
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // Branch if out >= info.length. This means that "in" is outside of the valid range.
__ Beqz(AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst32(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_reference);
__ ShiftAndAdd(out, out, TMP, TIMES_4);
__ Lw(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2661,10 +2652,8 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst32(calling_convention.GetRegisterAt(0), address);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 9a9ae714bc..0bd69c6ec8 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2267,54 +2267,45 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
Mips64Assembler* assembler = GetAssembler();
InstructionCodeGeneratorMIPS64* icodegen =
down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor());
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
- InvokeRuntimeCallingConvention calling_convention;
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ LoadConst64(out, address);
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address =
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
}
} else {
+ DCHECK(locations->CanCall());
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
Mips64Label allocate, done;
- int32_t count = static_cast<uint32_t>(info.high) - info.low + 1;
- // Is (info.low <= in) && (in <= info.high)?
__ Addiu32(out, in, -info.low);
- // As unsigned quantities is out < (info.high - info.low + 1)?
- __ LoadConst32(AT, count);
- // Branch if out >= (info.high - info.low + 1).
- // This means that "in" is outside of the range [info.low, info.high].
+ // As unsigned quantities is out < info.length ?
+ __ LoadConst32(AT, info.length);
+ // Branch if out >= info.length . This means that "in" is outside of the valid range.
__ Bgeuc(out, AT, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ LoadConst64(TMP, data_offset + address);
+ codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_reference);
__ Dlsa(out, out, TMP, TIMES_4);
__ Lwu(out, out, 0);
__ MaybeUnpoisonHeapReference(out);
@@ -2322,10 +2313,8 @@ void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ LoadConst64(calling_convention.GetRegisterAt(0), address);
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ StoreToOffset(kStoreWord, in, out, info.value_offset);
// `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
// one.
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index f84a33bb8e..98cea35af1 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2851,16 +2851,30 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
}
void IntrinsicLocationsBuilderX86::VisitIntegerValueOf(HInvoke* invoke) {
+ DCHECK(invoke->IsInvokeStaticOrDirect());
InvokeRuntimeCallingConvention calling_convention;
IntrinsicVisitor::ComputeIntegerValueOfLocations(
invoke,
codegen_,
Location::RegisterLocation(EAX),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+
+ LocationSummary* locations = invoke->GetLocations();
+ if (locations != nullptr) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ if (invoke_static_or_direct->HasSpecialInput() &&
+ invoke->InputAt(invoke_static_or_direct->GetSpecialInputIndex())
+ ->IsX86ComputeBaseMethodAddress()) {
+ locations->SetInAt(invoke_static_or_direct->GetSpecialInputIndex(),
+ Location::RequiresRegister());
+ }
+ }
}
void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ DCHECK(invoke->IsInvokeStaticOrDirect());
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
X86Assembler* assembler = GetAssembler();
@@ -2868,42 +2882,58 @@ void IntrinsicCodeGeneratorX86::VisitIntegerValueOf(HInvoke* invoke) {
InvokeRuntimeCallingConvention calling_convention;
if (invoke->InputAt(0)->IsConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(address));
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(
+ out, info.value_boot_image_reference, invoke->AsInvokeStaticOrDirect());
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
Register in = locations->InAt(0).AsRegister<Register>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- __ movl(out, Address(out, TIMES_4, data_offset + address));
+ constexpr size_t kElementSize = sizeof(mirror::HeapReference<mirror::Object>);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ if (codegen_->GetCompilerOptions().IsBootImage()) {
+ DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
+ size_t method_address_index = invoke->AsInvokeStaticOrDirect()->GetSpecialInputIndex();
+ HX86ComputeBaseMethodAddress* method_address =
+ invoke->InputAt(method_address_index)->AsX86ComputeBaseMethodAddress();
+ DCHECK(method_address != nullptr);
+ Register method_address_reg =
+ invoke->GetLocations()->InAt(method_address_index).AsRegister<Register>();
+ __ movl(out, Address(method_address_reg, out, TIMES_4, CodeGeneratorX86::kDummy32BitOffset));
+ codegen_->RecordBootImageIntrinsicPatch(method_address, info.array_data_boot_image_reference);
+ } else {
+ // Note: We're about to clobber the index in `out`, so we need to use `in` and
+ // adjust the offset accordingly.
+ uint32_t mid_array_boot_image_offset =
+ info.array_data_boot_image_reference - info.low * kElementSize;
+ codegen_->LoadBootImageAddress(
+ out, mid_array_boot_image_offset, invoke->AsInvokeStaticOrDirect());
+ DCHECK_NE(out, in);
+ __ movl(out, Address(out, in, TIMES_4, 0));
+ }
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(calling_convention.GetRegisterAt(0), Immediate(address));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ movl(Address(out, info.value_offset), in);
__ Bind(&done);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7627dc9490..ac6eab0834 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2660,58 +2660,49 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) {
}
void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) {
- IntrinsicVisitor::IntegerValueOfInfo info = IntrinsicVisitor::ComputeIntegerValueOfInfo();
+ IntrinsicVisitor::IntegerValueOfInfo info =
+ IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
LocationSummary* locations = invoke->GetLocations();
X86_64Assembler* assembler = GetAssembler();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
InvokeRuntimeCallingConvention calling_convention;
- if (invoke->InputAt(0)->IsConstant()) {
+ CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
+ if (invoke->InputAt(0)->IsIntConstant()) {
int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
- if (value >= info.low && value <= info.high) {
+ if (static_cast<uint32_t>(value - info.low) < info.length) {
// Just embed the j.l.Integer in the code.
- ScopedObjectAccess soa(Thread::Current());
- mirror::Object* boxed = info.cache->Get(value + (-info.low));
- DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed));
- __ movl(out, Immediate(static_cast<int32_t>(address)));
+ DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
+ codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
} else {
+ DCHECK(locations->CanCall());
// Allocate and initialize a new j.l.Integer.
// TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
// JIT object table.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ movl(Address(out, info.value_offset), Immediate(value));
}
} else {
+ DCHECK(locations->CanCall());
CpuRegister in = locations->InAt(0).AsRegister<CpuRegister>();
// Check bounds of our cache.
__ leal(out, Address(in, -info.low));
- __ cmpl(out, Immediate(info.high - info.low + 1));
+ __ cmpl(out, Immediate(info.length));
NearLabel allocate, done;
__ j(kAboveEqual, &allocate);
// If the value is within the bounds, load the j.l.Integer directly from the array.
- uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache));
- if (data_offset + address <= std::numeric_limits<int32_t>::max()) {
- __ movl(out, Address(out, TIMES_4, data_offset + address));
- } else {
- CpuRegister temp = CpuRegister(calling_convention.GetRegisterAt(0));
- __ movl(temp, Immediate(static_cast<int32_t>(data_offset + address)));
- __ movl(out, Address(temp, out, TIMES_4, 0));
- }
+ DCHECK_NE(out.AsRegister(), argument.AsRegister());
+ codegen_->LoadBootImageAddress(argument, info.array_data_boot_image_reference);
+ static_assert((1u << TIMES_4) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Check heap reference size.");
+ __ movl(out, Address(argument, out, TIMES_4, 0));
__ MaybeUnpoisonHeapReference(out);
__ jmp(&done);
__ Bind(&allocate);
// Otherwise allocate and initialize a new j.l.Integer.
- CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0));
- address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer));
- __ movl(argument, Immediate(static_cast<int32_t>(address)));
- codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
- CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
+ codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
+ info.integer_boot_image_offset);
__ movl(Address(out, info.value_offset), in);
__ Bind(&done);
}
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 9fa5b74c62..50bfe843b5 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -16,11 +16,9 @@
#include <fstream>
-#include "arch/x86/instruction_set_features_x86.h"
#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
-#include "code_generator_x86.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
#include "driver/compiler_options.h"
@@ -43,10 +41,8 @@ template <size_t number_of_blocks>
void LinearizeTest::TestCode(const std::vector<uint16_t>& data,
const uint32_t (&expected_order)[number_of_blocks]) {
HGraph* graph = CreateCFG(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 66660662e4..0fb90fb370 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -14,11 +14,9 @@
* limitations under the License.
*/
-#include "arch/x86/instruction_set_features_x86.h"
#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
-#include "code_generator_x86.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
#include "driver/compiler_options.h"
@@ -63,10 +61,8 @@ TEST_F(LiveRangesTest, CFG1) {
HGraph* graph = BuildGraph(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -109,10 +105,8 @@ TEST_F(LiveRangesTest, CFG2) {
Instruction::RETURN | 0 << 8);
HGraph* graph = BuildGraph(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
LiveInterval* interval = liveness.GetInstructionFromSsaIndex(0)->GetLiveInterval();
@@ -158,10 +152,8 @@ TEST_F(LiveRangesTest, CFG3) {
Instruction::RETURN | 0 << 8);
HGraph* graph = BuildGraph(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
// Test for the 4 constant.
@@ -235,10 +227,8 @@ TEST_F(LiveRangesTest, Loop1) {
HGraph* graph = BuildGraph(data);
RemoveSuspendChecks(graph);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -312,10 +302,8 @@ TEST_F(LiveRangesTest, Loop2) {
Instruction::RETURN | 0 << 8);
HGraph* graph = BuildGraph(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
@@ -388,10 +376,8 @@ TEST_F(LiveRangesTest, CFG4) {
Instruction::RETURN);
HGraph* graph = BuildGraph(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
// Test for the 0 constant.
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 6621a03568..72f995e773 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -14,11 +14,9 @@
* limitations under the License.
*/
-#include "arch/x86/instruction_set_features_x86.h"
#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
-#include "code_generator_x86.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
#include "driver/compiler_options.h"
@@ -50,10 +48,8 @@ void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expec
HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
- SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
std::ostringstream buffer;
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index a2124455e2..efb23e7d3e 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -17,19 +17,34 @@
#include "loop_analysis.h"
#include "base/bit_vector-inl.h"
+#include "induction_var_range.h"
namespace art {
void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info,
- LoopAnalysisInfo* analysis_results) {
+ LoopAnalysisInfo* analysis_results,
+ int64_t trip_count) {
+ analysis_results->trip_count_ = trip_count;
+
for (HBlocksInLoopIterator block_it(*loop_info);
!block_it.Done();
block_it.Advance()) {
HBasicBlock* block = block_it.Current();
+ // Check whether one of the successor is loop exit.
for (HBasicBlock* successor : block->GetSuccessors()) {
if (!loop_info->Contains(*successor)) {
analysis_results->exits_num_++;
+
+ // We track number of invariant loop exits which correspond to HIf instruction and
+ // can be eliminated by loop peeling; other control flow instruction are ignored and will
+ // not cause loop peeling to happen as they either cannot be inside a loop, or by
+ // definition cannot be loop exits (unconditional instructions), or are not beneficial for
+ // the optimization.
+ HIf* hif = block->GetLastInstruction()->AsIf();
+ if (hif != nullptr && !loop_info->Contains(*hif->InputAt(0)->GetBlock())) {
+ analysis_results->invariant_exits_num_++;
+ }
}
}
@@ -48,20 +63,13 @@ void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info,
}
}
-bool LoopAnalysis::HasLoopAtLeastOneInvariantExit(HLoopInformation* loop_info) {
- HGraph* graph = loop_info->GetHeader()->GetGraph();
- for (uint32_t block_id : loop_info->GetBlocks().Indexes()) {
- HBasicBlock* block = graph->GetBlocks()[block_id];
- DCHECK(block != nullptr);
- if (block->EndsWithIf()) {
- HIf* hif = block->GetLastInstruction()->AsIf();
- HInstruction* input = hif->InputAt(0);
- if (IsLoopExit(loop_info, hif) && !loop_info->Contains(*input->GetBlock())) {
- return true;
- }
- }
+int64_t LoopAnalysis::GetLoopTripCount(HLoopInformation* loop_info,
+ const InductionVarRange* induction_range) {
+ int64_t trip_count;
+ if (!induction_range->HasKnownTripCount(loop_info, &trip_count)) {
+ trip_count = LoopAnalysisInfo::kUnknownTripCount;
}
- return false;
+ return trip_count;
}
// Default implementation of loop helper; used for all targets unless a custom implementation
@@ -77,18 +85,22 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
// Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
static constexpr uint32_t kScalarHeuristicMaxBodySizeBlocks = 6;
- bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE {
- return loop_analysis_info->HasLongTypeInstructions() ||
- IsLoopTooBig(loop_analysis_info,
+ bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ return analysis_info->HasLongTypeInstructions() ||
+ IsLoopTooBig(analysis_info,
kScalarHeuristicMaxBodySizeInstr,
kScalarHeuristicMaxBodySizeBlocks);
}
- uint32_t GetScalarUnrollingFactor(HLoopInformation* loop_info ATTRIBUTE_UNUSED,
- uint64_t trip_count) const OVERRIDE {
+ uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ int64_t trip_count = analysis_info->GetTripCount();
+ // Unroll only loops with known trip count.
+ if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
+ return LoopAnalysisInfo::kNoUnrollingFactor;
+ }
uint32_t desired_unrolling_factor = kScalarMaxUnrollFactor;
if (trip_count < desired_unrolling_factor || trip_count % desired_unrolling_factor != 0) {
- return kNoUnrollingFactor;
+ return LoopAnalysisInfo::kNoUnrollingFactor;
}
return desired_unrolling_factor;
@@ -136,12 +148,12 @@ class Arm64LoopHelper : public ArchDefaultLoopHelper {
// TODO: Unroll loops with unknown trip count.
DCHECK_NE(vector_length, 0u);
if (trip_count < (2 * vector_length + max_peel)) {
- return kNoUnrollingFactor;
+ return LoopAnalysisInfo::kNoUnrollingFactor;
}
// Don't unroll for large loop body size.
uint32_t instruction_count = block->GetInstructions().CountSize();
if (instruction_count >= kArm64SimdHeuristicMaxBodySizeInstr) {
- return kNoUnrollingFactor;
+ return LoopAnalysisInfo::kNoUnrollingFactor;
}
// Find a beneficial unroll factor with the following restrictions:
// - At least one iteration of the transformed loop should be executed.
diff --git a/compiler/optimizing/loop_analysis.h b/compiler/optimizing/loop_analysis.h
index c09d3ff00f..bcb7b70494 100644
--- a/compiler/optimizing/loop_analysis.h
+++ b/compiler/optimizing/loop_analysis.h
@@ -21,26 +21,33 @@
namespace art {
+class InductionVarRange;
class LoopAnalysis;
-// No loop unrolling factor (just one copy of the loop-body).
-static constexpr uint32_t kNoUnrollingFactor = 1;
-
// Class to hold cached information on properties of the loop.
class LoopAnalysisInfo : public ValueObject {
public:
+ // No loop unrolling factor (just one copy of the loop-body).
+ static constexpr uint32_t kNoUnrollingFactor = 1;
+ // Used for unknown and non-constant trip counts (see InductionVarRange::HasKnownTripCount).
+ static constexpr int64_t kUnknownTripCount = -1;
+
explicit LoopAnalysisInfo(HLoopInformation* loop_info)
- : bb_num_(0),
+ : trip_count_(kUnknownTripCount),
+ bb_num_(0),
instr_num_(0),
exits_num_(0),
+ invariant_exits_num_(0),
has_instructions_preventing_scalar_peeling_(false),
has_instructions_preventing_scalar_unrolling_(false),
has_long_type_instructions_(false),
loop_info_(loop_info) {}
+ int64_t GetTripCount() const { return trip_count_; }
size_t GetNumberOfBasicBlocks() const { return bb_num_; }
size_t GetNumberOfInstructions() const { return instr_num_; }
size_t GetNumberOfExits() const { return exits_num_; }
+ size_t GetNumberOfInvariantExits() const { return invariant_exits_num_; }
bool HasInstructionsPreventingScalarPeeling() const {
return has_instructions_preventing_scalar_peeling_;
@@ -50,19 +57,27 @@ class LoopAnalysisInfo : public ValueObject {
return has_instructions_preventing_scalar_unrolling_;
}
+ bool HasInstructionsPreventingScalarOpts() const {
+ return HasInstructionsPreventingScalarPeeling() || HasInstructionsPreventingScalarUnrolling();
+ }
+
bool HasLongTypeInstructions() const {
return has_long_type_instructions_;
}
- const HLoopInformation* GetLoopInfo() const { return loop_info_; }
+ HLoopInformation* GetLoopInfo() const { return loop_info_; }
private:
+ // Trip count of the loop if known, kUnknownTripCount otherwise.
+ int64_t trip_count_;
// Number of basic blocks in the loop body.
size_t bb_num_;
// Number of instructions in the loop body.
size_t instr_num_;
// Number of loop's exits.
size_t exits_num_;
+ // Number of "if" loop exits (with HIf instruction) whose condition is loop-invariant.
+ size_t invariant_exits_num_;
// Whether the loop has instructions which make scalar loop peeling non-beneficial.
bool has_instructions_preventing_scalar_peeling_;
// Whether the loop has instructions which make scalar loop unrolling non-beneficial.
@@ -72,7 +87,7 @@ class LoopAnalysisInfo : public ValueObject {
bool has_long_type_instructions_;
// Corresponding HLoopInformation.
- const HLoopInformation* loop_info_;
+ HLoopInformation* loop_info_;
friend class LoopAnalysis;
};
@@ -84,20 +99,12 @@ class LoopAnalysis : public ValueObject {
// Calculates loops basic properties like body size, exits number, etc. and fills
// 'analysis_results' with this information.
static void CalculateLoopBasicProperties(HLoopInformation* loop_info,
- LoopAnalysisInfo* analysis_results);
+ LoopAnalysisInfo* analysis_results,
+ int64_t trip_count);
- // Returns whether the loop has at least one loop invariant exit.
- static bool HasLoopAtLeastOneInvariantExit(HLoopInformation* loop_info);
-
- // Returns whether HIf's true or false successor is outside the specified loop.
- //
- // Prerequisite: HIf must be in the specified loop.
- static bool IsLoopExit(HLoopInformation* loop_info, const HIf* hif) {
- DCHECK(loop_info->Contains(*hif->GetBlock()));
- HBasicBlock* true_succ = hif->IfTrueSuccessor();
- HBasicBlock* false_succ = hif->IfFalseSuccessor();
- return (!loop_info->Contains(*true_succ) || !loop_info->Contains(*false_succ));
- }
+ // Returns the trip count of the loop if it is known and kUnknownTripCount otherwise.
+ static int64_t GetLoopTripCount(HLoopInformation* loop_info,
+ const InductionVarRange* induction_range);
private:
// Returns whether an instruction makes scalar loop peeling/unrolling non-beneficial.
@@ -113,9 +120,7 @@ class LoopAnalysis : public ValueObject {
instruction->IsUnresolvedStaticFieldGet() ||
instruction->IsUnresolvedStaticFieldSet() ||
// TODO: Support loops with intrinsified invokes.
- instruction->IsInvoke() ||
- // TODO: Support loops with ClinitChecks.
- instruction->IsClinitCheck());
+ instruction->IsInvoke());
}
};
@@ -145,9 +150,9 @@ class ArchNoOptsLoopHelper : public ArenaObject<kArenaAllocOptimization> {
// Returns optimal scalar unrolling factor for the loop.
//
// Returns kNoUnrollingFactor by default, should be overridden by particular target loop helper.
- virtual uint32_t GetScalarUnrollingFactor(HLoopInformation* loop_info ATTRIBUTE_UNUSED,
- uint64_t trip_count ATTRIBUTE_UNUSED) const {
- return kNoUnrollingFactor;
+ virtual uint32_t GetScalarUnrollingFactor(
+ const LoopAnalysisInfo* analysis_info ATTRIBUTE_UNUSED) const {
+ return LoopAnalysisInfo::kNoUnrollingFactor;
}
// Returns whether scalar loop peeling is enabled,
@@ -162,7 +167,7 @@ class ArchNoOptsLoopHelper : public ArenaObject<kArenaAllocOptimization> {
int64_t trip_count ATTRIBUTE_UNUSED,
uint32_t max_peel ATTRIBUTE_UNUSED,
uint32_t vector_length ATTRIBUTE_UNUSED) const {
- return kNoUnrollingFactor;
+ return LoopAnalysisInfo::kNoUnrollingFactor;
}
};
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index eda6bd1e86..440cd3351e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -23,7 +23,7 @@
#include "arch/mips64/instruction_set_features_mips64.h"
#include "arch/x86/instruction_set_features_x86.h"
#include "arch/x86_64/instruction_set_features_x86_64.h"
-#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "linear_order.h"
#include "mirror/array-inl.h"
#include "mirror/string.h"
@@ -427,12 +427,12 @@ static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) {
//
HLoopOptimization::HLoopOptimization(HGraph* graph,
- CompilerDriver* compiler_driver,
+ const CompilerOptions* compiler_options,
HInductionVarAnalysis* induction_analysis,
OptimizingCompilerStats* stats,
const char* name)
: HOptimization(graph, name, stats),
- compiler_driver_(compiler_driver),
+ compiler_options_(compiler_options),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
global_allocator_(graph_->GetAllocator()),
@@ -454,8 +454,8 @@ HLoopOptimization::HLoopOptimization(HGraph* graph,
vector_header_(nullptr),
vector_body_(nullptr),
vector_index_(nullptr),
- arch_loop_helper_(ArchNoOptsLoopHelper::Create(compiler_driver_ != nullptr
- ? compiler_driver_->GetInstructionSet()
+ arch_loop_helper_(ArchNoOptsLoopHelper::Create(compiler_options_ != nullptr
+ ? compiler_options_->GetInstructionSet()
: InstructionSet::kNone,
global_allocator_)) {
}
@@ -744,100 +744,102 @@ bool HLoopOptimization::TryOptimizeInnerLoopFinite(LoopNode* node) {
}
bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
- return TryOptimizeInnerLoopFinite(node) ||
- TryPeelingForLoopInvariantExitsElimination(node) ||
- TryUnrollingForBranchPenaltyReduction(node);
+ return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
}
//
-// Loop unrolling: generic part methods.
+// Scalar loop peeling and unrolling: generic part methods.
//
-bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopNode* node) {
- // Don't run peeling/unrolling if compiler_driver_ is nullptr (i.e., running under tests)
- // as InstructionSet is needed.
- if (compiler_driver_ == nullptr) {
+bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
+ bool generate_code) {
+ if (analysis_info->GetNumberOfExits() > 1) {
return false;
}
- HLoopInformation* loop_info = node->loop_info;
- int64_t trip_count = 0;
- // Only unroll loops with a known tripcount.
- if (!induction_range_.HasKnownTripCount(loop_info, &trip_count)) {
+ uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(analysis_info);
+ if (unrolling_factor == LoopAnalysisInfo::kNoUnrollingFactor) {
return false;
}
- uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(loop_info, trip_count);
- if (unrolling_factor == kNoUnrollingFactor) {
- return false;
- }
+ if (generate_code) {
+ // TODO: support other unrolling factors.
+ DCHECK_EQ(unrolling_factor, 2u);
- LoopAnalysisInfo loop_analysis_info(loop_info);
- LoopAnalysis::CalculateLoopBasicProperties(loop_info, &loop_analysis_info);
+ // Perform unrolling.
+ HLoopInformation* loop_info = analysis_info->GetLoopInfo();
+ PeelUnrollSimpleHelper helper(loop_info);
+ helper.DoUnrolling();
- // Check "IsLoopClonable" last as it can be time-consuming.
- if (loop_analysis_info.HasInstructionsPreventingScalarUnrolling() ||
- arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&loop_analysis_info) ||
- (loop_analysis_info.GetNumberOfExits() > 1) ||
- !PeelUnrollHelper::IsLoopClonable(loop_info)) {
- return false;
+ // Remove the redundant loop check after unrolling.
+ HIf* copy_hif =
+ helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
+ int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
+ copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
}
+ return true;
+}
- // TODO: support other unrolling factors.
- DCHECK_EQ(unrolling_factor, 2u);
+bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
+ bool generate_code) {
+ HLoopInformation* loop_info = analysis_info->GetLoopInfo();
+ if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
+ return false;
+ }
- // Perform unrolling.
- PeelUnrollSimpleHelper helper(loop_info);
- helper.DoUnrolling();
+ if (analysis_info->GetNumberOfInvariantExits() == 0) {
+ return false;
+ }
- // Remove the redundant loop check after unrolling.
- HIf* copy_hif =
- helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
- int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
- copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
+ if (generate_code) {
+ // Perform peeling.
+ PeelUnrollSimpleHelper helper(loop_info);
+ helper.DoPeeling();
+
+ // Statically evaluate loop check after peeling for loop invariant condition.
+ const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
+ for (auto entry : *hir_map) {
+ HInstruction* copy = entry.second;
+ if (copy->IsIf()) {
+ TryToEvaluateIfCondition(copy->AsIf(), graph_);
+ }
+ }
+ }
return true;
}
-bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopNode* node) {
- // Don't run peeling/unrolling if compiler_driver_ is nullptr (i.e., running under tests)
+bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
+ // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
// as InstructionSet is needed.
- if (compiler_driver_ == nullptr) {
+ if (compiler_options_ == nullptr) {
return false;
}
HLoopInformation* loop_info = node->loop_info;
- // Check 'IsLoopClonable' the last as it might be time-consuming.
- if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
+ int64_t trip_count = LoopAnalysis::GetLoopTripCount(loop_info, &induction_range_);
+ LoopAnalysisInfo analysis_info(loop_info);
+ LoopAnalysis::CalculateLoopBasicProperties(loop_info, &analysis_info, trip_count);
+
+ if (analysis_info.HasInstructionsPreventingScalarOpts() ||
+ arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&analysis_info)) {
return false;
}
- LoopAnalysisInfo loop_analysis_info(loop_info);
- LoopAnalysis::CalculateLoopBasicProperties(loop_info, &loop_analysis_info);
-
- // Check "IsLoopClonable" last as it can be time-consuming.
- if (loop_analysis_info.HasInstructionsPreventingScalarPeeling() ||
- arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&loop_analysis_info) ||
- !LoopAnalysis::HasLoopAtLeastOneInvariantExit(loop_info) ||
- !PeelUnrollHelper::IsLoopClonable(loop_info)) {
+ if (!TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
+ !TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
return false;
}
- // Perform peeling.
- PeelUnrollSimpleHelper helper(loop_info);
- helper.DoPeeling();
-
- const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
- for (auto entry : *hir_map) {
- HInstruction* copy = entry.second;
- if (copy->IsIf()) {
- TryToEvaluateIfCondition(copy->AsIf(), graph_);
- }
+ // Run 'IsLoopClonable' the last as it might be time-consuming.
+ if (!PeelUnrollHelper::IsLoopClonable(loop_info)) {
+ return false;
}
- return true;
+ return TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
+ TryUnrollingForBranchPenaltyReduction(&analysis_info);
}
//
@@ -1076,7 +1078,7 @@ void HLoopOptimization::Vectorize(LoopNode* node,
vector_index_,
ptc,
graph_->GetConstant(induc_type, 1),
- kNoUnrollingFactor);
+ LoopAnalysisInfo::kNoUnrollingFactor);
}
// Generate vector loop, possibly further unrolled:
@@ -1103,7 +1105,7 @@ void HLoopOptimization::Vectorize(LoopNode* node,
vector_index_,
stc,
graph_->GetConstant(induc_type, 1),
- kNoUnrollingFactor);
+ LoopAnalysisInfo::kNoUnrollingFactor);
}
// Link reductions to their final uses.
@@ -1459,7 +1461,7 @@ bool HLoopOptimization::VectorizeUse(LoopNode* node,
}
uint32_t HLoopOptimization::GetVectorSizeInBytes() {
- switch (compiler_driver_->GetInstructionSet()) {
+ switch (compiler_options_->GetInstructionSet()) {
case InstructionSet::kArm:
case InstructionSet::kThumb2:
return 8; // 64-bit SIMD
@@ -1469,8 +1471,8 @@ uint32_t HLoopOptimization::GetVectorSizeInBytes() {
}
bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
- const InstructionSetFeatures* features = compiler_driver_->GetInstructionSetFeatures();
- switch (compiler_driver_->GetInstructionSet()) {
+ const InstructionSetFeatures* features = compiler_options_->GetInstructionSetFeatures();
+ switch (compiler_options_->GetInstructionSet()) {
case InstructionSet::kArm:
case InstructionSet::kThumb2:
// Allow vectorization for all ARM devices, because Android assumes that
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 191a93da26..bc4792458b 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -27,7 +27,7 @@
namespace art {
-class CompilerDriver;
+class CompilerOptions;
class ArchNoOptsLoopHelper;
/**
@@ -38,7 +38,7 @@ class ArchNoOptsLoopHelper;
class HLoopOptimization : public HOptimization {
public:
HLoopOptimization(HGraph* graph,
- CompilerDriver* compiler_driver,
+ const CompilerOptions* compiler_options,
HInductionVarAnalysis* induction_analysis,
OptimizingCompilerStats* stats,
const char* name = kLoopOptimizationPassName);
@@ -144,12 +144,19 @@ class HLoopOptimization : public HOptimization {
bool OptimizeInnerLoop(LoopNode* node);
// Tries to apply loop unrolling for branch penalty reduction and better instruction scheduling
- // opportunities. Returns whether transformation happened.
- bool TryUnrollingForBranchPenaltyReduction(LoopNode* loop_node);
+ // opportunities. Returns whether transformation happened. 'generate_code' determines whether the
+ // optimization should be actually applied.
+ bool TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
+ bool generate_code = true);
// Tries to apply loop peeling for loop invariant exits elimination. Returns whether
- // transformation happened.
- bool TryPeelingForLoopInvariantExitsElimination(LoopNode* loop_node);
+ // transformation happened. 'generate_code' determines whether the optimization should be
+ // actually applied.
+ bool TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
+ bool generate_code = true);
+
+ // Tries to apply scalar loop peeling and unrolling.
+ bool TryPeelingAndUnrolling(LoopNode* node);
//
// Vectorization analysis and synthesis.
@@ -243,8 +250,8 @@ class HLoopOptimization : public HOptimization {
void RemoveDeadInstructions(const HInstructionList& list);
bool CanRemoveCycle(); // Whether the current 'iset_' is removable.
- // Compiler driver (to query ISA features).
- const CompilerDriver* compiler_driver_;
+ // Compiler options (to query ISA features).
+ const CompilerOptions* compiler_options_;
// Range information based on prior induction variable analysis.
InductionVarRange induction_range_;
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index c21bd65d97..c7cc661303 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -29,7 +29,8 @@ class LoopOptimizationTest : public OptimizingUnitTest {
LoopOptimizationTest()
: graph_(CreateGraph()),
iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)),
- loop_opt_(new (GetAllocator()) HLoopOptimization(graph_, nullptr, iva_, nullptr)) {
+ loop_opt_(new (GetAllocator()) HLoopOptimization(
+ graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) {
BuildGraph();
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index ef8a757ad0..d243331dbe 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1305,6 +1305,19 @@ void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction*
}
}
+void HInstruction::ReplaceEnvUsesDominatedBy(HInstruction* dominator, HInstruction* replacement) {
+ const HUseList<HEnvironment*>& uses = GetEnvUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
+ ++it;
+ if (dominator->StrictlyDominates(user->GetHolder())) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+}
+
void HInstruction::ReplaceInput(HInstruction* replacement, size_t index) {
HUserRecord<HInstruction*> input_use = InputRecordAt(index);
if (input_use.GetInstruction() == replacement) {
@@ -2786,6 +2799,14 @@ void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) {
SetPackedFlag<kFlagReferenceTypeIsExact>(rti.IsExact());
}
+bool HBoundType::InstructionDataEquals(const HInstruction* other) const {
+ const HBoundType* other_bt = other->AsBoundType();
+ ScopedObjectAccess soa(Thread::Current());
+ return GetUpperBound().IsEqual(other_bt->GetUpperBound()) &&
+ GetUpperCanBeNull() == other_bt->GetUpperCanBeNull() &&
+ CanBeNull() == other_bt->CanBeNull();
+}
+
void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) {
if (kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 3fd5b6b02d..cd8d07a17a 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1380,6 +1380,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(InvokeStaticOrDirect, Invoke) \
M(InvokeVirtual, Invoke) \
M(InvokePolymorphic, Invoke) \
+ M(InvokeCustom, Invoke) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
M(LoadClass, Instruction) \
@@ -2216,6 +2217,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
void ReplaceWith(HInstruction* instruction);
void ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
+ void ReplaceEnvUsesDominatedBy(HInstruction* dominator, HInstruction* replacement);
void ReplaceInput(HInstruction* replacement, size_t index);
// This is almost the same as doing `ReplaceWith()`. But in this helper, the
@@ -4382,6 +4384,38 @@ class HInvokePolymorphic FINAL : public HInvoke {
DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
+class HInvokeCustom FINAL : public HInvoke {
+ public:
+ HInvokeCustom(ArenaAllocator* allocator,
+ uint32_t number_of_arguments,
+ uint32_t call_site_index,
+ DataType::Type return_type,
+ uint32_t dex_pc)
+ : HInvoke(kInvokeCustom,
+ allocator,
+ number_of_arguments,
+ /* number_of_other_inputs */ 0u,
+ return_type,
+ dex_pc,
+ /* dex_method_index */ dex::kDexNoIndex,
+ /* resolved_method */ nullptr,
+ kStatic),
+ call_site_index_(call_site_index) {
+ }
+
+ uint32_t GetCallSiteIndex() const { return call_site_index_; }
+
+ bool IsClonable() const OVERRIDE { return true; }
+
+ DECLARE_INSTRUCTION(InvokeCustom);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeCustom);
+
+ private:
+ uint32_t call_site_index_;
+};
+
class HInvokeStaticOrDirect FINAL : public HInvoke {
public:
// Requirements of this method call regarding the class
@@ -5122,6 +5156,7 @@ class HDivZeroCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -5573,6 +5608,7 @@ class HTypeConversion FINAL : public HExpression<1> {
DataType::Type GetInputType() const { return GetInput()->GetType(); }
DataType::Type GetResultType() const { return GetType(); }
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -6510,9 +6546,9 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
class HLoadMethodHandle FINAL : public HInstruction {
public:
HLoadMethodHandle(HCurrentMethod* current_method,
- uint16_t method_handle_idx,
- const DexFile& dex_file,
- uint32_t dex_pc)
+ uint16_t method_handle_idx,
+ const DexFile& dex_file,
+ uint32_t dex_pc)
: HInstruction(kLoadMethodHandle,
DataType::Type::kReference,
SideEffectsForArchRuntimeCalls(),
@@ -6608,8 +6644,7 @@ class HClinitCheck FINAL : public HExpression<1> {
dex_pc) {
SetRawInputAt(0, constant);
}
-
- bool IsClonable() const OVERRIDE { return true; }
+ // TODO: Make ClinitCheck clonable.
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -7079,6 +7114,8 @@ class HInstanceOf FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffectsForArchRuntimeCalls(check_kind)) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool NeedsEnvironment() const OVERRIDE {
return CanCallRuntime(GetTypeCheckKind());
}
@@ -7109,6 +7146,7 @@ class HBoundType FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
+ bool InstructionDataEquals(const HInstruction* other) const OVERRIDE;
bool IsClonable() const OVERRIDE { return true; }
// {Get,Set}Upper* should only be used in reference type propagation.
@@ -7167,6 +7205,7 @@ class HCheckCast FINAL : public HTypeCheckInstruction {
bitstring_mask,
SideEffects::CanTriggerGC()) {}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE {
// Instruction may throw a CheckCastError.
return true;
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index c5e9a8d036..b4f9993ad6 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -958,6 +958,10 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
SetRawInputAt(2, mul_right);
}
+ static constexpr int kInputAccumulatorIndex = 0;
+ static constexpr int kInputMulLeftIndex = 1;
+ static constexpr int kInputMulRightIndex = 2;
+
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index d37c43db81..3c803ab627 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -28,6 +28,7 @@
#endif
#ifdef ART_ENABLE_CODEGEN_x86
#include "pc_relative_fixups_x86.h"
+#include "instruction_simplifier_x86.h"
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
#include "x86_memory_gen.h"
@@ -40,6 +41,7 @@
#include "constructor_fence_redundancy_elimination.h"
#include "dead_code_elimination.h"
#include "dex/code_item_accessors-inl.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "gvn.h"
#include "induction_var_analysis.h"
@@ -120,6 +122,8 @@ const char* OptimizationPassName(OptimizationPass pass) {
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
case OptimizationPass::kX86MemoryOperandGeneration:
return x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName;
+ case OptimizationPass::kInstructionSimplifierX86:
+ return x86::InstructionSimplifierX86::kInstructionSimplifierX86PassName;
#endif
case OptimizationPass::kNone:
LOG(FATAL) << "kNone does not represent an actual pass";
@@ -162,6 +166,7 @@ OptimizationPass OptimizationPassByName(const std::string& pass_name) {
#ifdef ART_ENABLE_CODEGEN_x86
X(OptimizationPass::kPcRelativeFixupsX86);
X(OptimizationPass::kX86MemoryOperandGeneration);
+ X(OptimizationPass::kInstructionSimplifierX86);
#endif
LOG(FATAL) << "Cannot find optimization " << pass_name;
UNREACHABLE();
@@ -224,7 +229,7 @@ ArenaVector<HOptimization*> ConstructOptimizations(
case OptimizationPass::kLoopOptimization:
CHECK(most_recent_induction != nullptr);
opt = new (allocator) HLoopOptimization(
- graph, driver, most_recent_induction, stats, pass_name);
+ graph, &codegen->GetCompilerOptions(), most_recent_induction, stats, pass_name);
break;
case OptimizationPass::kBoundsCheckElimination:
CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
@@ -264,13 +269,13 @@ ArenaVector<HOptimization*> ConstructOptimizations(
break;
}
case OptimizationPass::kSharpening:
- opt = new (allocator) HSharpening(graph, codegen, driver, pass_name);
+ opt = new (allocator) HSharpening(graph, codegen, pass_name);
break;
case OptimizationPass::kSelectGenerator:
opt = new (allocator) HSelectGenerator(graph, handles, stats, pass_name);
break;
case OptimizationPass::kInstructionSimplifier:
- opt = new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name);
+ opt = new (allocator) InstructionSimplifier(graph, codegen, stats, pass_name);
break;
case OptimizationPass::kIntrinsicsRecognizer:
opt = new (allocator) IntrinsicsRecognizer(graph, stats, pass_name);
@@ -286,7 +291,7 @@ ArenaVector<HOptimization*> ConstructOptimizations(
break;
case OptimizationPass::kScheduling:
opt = new (allocator) HInstructionScheduling(
- graph, driver->GetInstructionSet(), codegen, pass_name);
+ graph, codegen->GetCompilerOptions().GetInstructionSet(), codegen, pass_name);
break;
//
// Arch-specific passes.
@@ -322,6 +327,10 @@ ArenaVector<HOptimization*> ConstructOptimizations(
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
break;
+ case OptimizationPass::kInstructionSimplifierX86:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
+ break;
#endif
case OptimizationPass::kNone:
LOG(FATAL) << "kNone does not represent an actual pass";
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index 88b283cebf..a9fafa0864 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -101,6 +101,7 @@ enum class OptimizationPass {
#endif
#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
kX86MemoryOperandGeneration,
+ kInstructionSimplifierX86,
#endif
kNone,
kLast = kNone
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 2e189fdd14..1c1cf28294 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -47,25 +47,20 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
static constexpr bool kGenerateExpected = false;
OptimizingCFITest()
- : pool_and_allocator_(),
- opts_(),
- isa_features_(),
- graph_(nullptr),
+ : graph_(nullptr),
code_gen_(),
blocks_(GetAllocator()->Adapter()) {}
- ArenaAllocator* GetAllocator() { return pool_and_allocator_.GetAllocator(); }
-
void SetUpFrame(InstructionSet isa) {
+ OverrideInstructionSetFeatures(isa, "default");
+
// Ensure that slow-debug is off, so that there is no unexpected read-barrier check emitted.
SetRuntimeDebugFlagsEnabled(false);
// Setup simple context.
- std::string error;
- isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
graph_ = CreateGraph();
// Generate simple frame with some spills.
- code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
+ code_gen_ = CodeGenerator::Create(graph_, *compiler_options_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
code_gen_->InitializeCodeGenerationData();
const int frame_size = 64;
@@ -148,9 +143,6 @@ class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
DISALLOW_COPY_AND_ASSIGN(InternalCodeAllocator);
};
- ArenaPoolAndAllocator pool_and_allocator_;
- CompilerOptions opts_;
- std::unique_ptr<const InstructionSetFeatures> isa_features_;
HGraph* graph_;
std::unique_ptr<CodeGenerator> code_gen_;
ArenaVector<HBasicBlock*> blocks_;
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 79ac6b9b9d..5352f26e46 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -287,7 +287,7 @@ class OptimizingCompiler FINAL : public Compiler {
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
- InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
+ InstructionSetPointerSize(GetCompilerDriver()->GetCompilerOptions().GetInstructionSet())));
}
void Init() OVERRIDE;
@@ -460,7 +460,7 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
- switch (GetCompilerDriver()->GetInstructionSet()) {
+ switch (codegen->GetCompilerOptions().GetInstructionSet()) {
#if defined(ART_ENABLE_CODEGEN_arm)
case InstructionSet::kThumb2:
case InstructionSet::kArm: {
@@ -530,7 +530,8 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
OptDef(OptimizationPass::kPcRelativeFixupsX86),
- OptDef(OptimizationPass::kX86MemoryOperandGeneration)
+ OptDef(OptimizationPass::kX86MemoryOperandGeneration),
+ OptDef(OptimizationPass::kInstructionSimplifierX86)
};
return RunOptimizations(graph,
codegen,
@@ -545,7 +546,8 @@ bool OptimizingCompiler::RunArchOptimizations(HGraph* graph,
OptimizationDef x86_64_optimizations[] = {
OptDef(OptimizationPass::kSideEffectsAnalysis),
OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
- OptDef(OptimizationPass::kX86MemoryOperandGeneration)
+ OptDef(OptimizationPass::kX86MemoryOperandGeneration),
+ OptDef(OptimizationPass::kInstructionSimplifierX86)
};
return RunOptimizations(graph,
codegen,
@@ -758,7 +760,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
- InstructionSet instruction_set = compiler_driver->GetInstructionSet();
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
+ InstructionSet instruction_set = compiler_options.GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
const DexFile::CodeItem* code_item = dex_compilation_unit.GetCodeItem();
@@ -782,7 +785,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// Implementation of the space filter: do not compile a code item whose size in
// code units is bigger than 128.
static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
- const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
&& (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
kSpaceFilterOptimizingThreshold)) {
@@ -796,7 +798,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
arena_stack,
dex_file,
method_idx,
- compiler_driver->GetInstructionSet(),
+ compiler_options.GetInstructionSet(),
kInvalidInvokeType,
compiler_driver->GetCompilerOptions().GetDebuggable(),
osr);
@@ -813,9 +815,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
std::unique_ptr<CodeGenerator> codegen(
CodeGenerator::Create(graph,
- instruction_set,
- *compiler_driver->GetInstructionSetFeatures(),
- compiler_driver->GetCompilerOptions(),
+ compiler_options,
compilation_stats_.get()));
if (codegen.get() == nullptr) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
@@ -848,23 +848,23 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
case kAnalysisSkipped: {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kNotCompiledSkipped);
- }
break;
+ }
case kAnalysisInvalidBytecode: {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kNotCompiledInvalidBytecode);
- }
break;
+ }
case kAnalysisFailThrowCatchLoop: {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kNotCompiledThrowCatchLoop);
- }
break;
+ }
case kAnalysisFailAmbiguousArrayOp: {
MaybeRecordStat(compilation_stats_.get(),
MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
- }
break;
+ }
case kAnalysisSuccess:
UNREACHABLE();
}
@@ -903,7 +903,8 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
VariableSizedHandleScope* handles) const {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
- InstructionSet instruction_set = compiler_driver->GetInstructionSet();
+ const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
+ InstructionSet instruction_set = compiler_options.GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
uint32_t method_idx = dex_compilation_unit.GetDexMethodIndex();
@@ -921,7 +922,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
arena_stack,
dex_file,
method_idx,
- compiler_driver->GetInstructionSet(),
+ compiler_driver->GetCompilerOptions().GetInstructionSet(),
kInvalidInvokeType,
compiler_driver->GetCompilerOptions().GetDebuggable(),
/* osr */ false);
@@ -932,15 +933,12 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
std::unique_ptr<CodeGenerator> codegen(
CodeGenerator::Create(graph,
- instruction_set,
- *compiler_driver->GetInstructionSetFeatures(),
- compiler_driver->GetCompilerOptions(),
+ compiler_options,
compilation_stats_.get()));
if (codegen.get() == nullptr) {
return nullptr;
}
- codegen->GetAssembler()->cfi().SetEnabled(
- compiler_driver->GetCompilerOptions().GenerateAnyDebugInfo());
+ codegen->GetAssembler()->cfi().SetEnabled(compiler_options.GenerateAnyDebugInfo());
PassObserver pass_observer(graph,
codegen.get(),
@@ -1095,7 +1093,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (kIsDebugBuild &&
IsCompilingWithCoreImage() &&
- IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
+ IsInstructionSetSupported(compiler_driver->GetCompilerOptions().GetInstructionSet())) {
// For testing purposes, we put a special marker on method names
// that should be compiled with this compiler (when the
// instruction set is supported). This makes sure we're not
@@ -1112,7 +1110,8 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
uint32_t method_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
- if (GetCompilerDriver()->GetCompilerOptions().IsBootImage()) {
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+ if (compiler_options.IsBootImage()) {
ScopedObjectAccess soa(Thread::Current());
Runtime* runtime = Runtime::Current();
ArtMethod* method = runtime->GetClassLinker()->LookupResolvedMethod(
@@ -1154,7 +1153,7 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
}
JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
- GetCompilerDriver(), access_flags, method_idx, dex_file);
+ compiler_options, access_flags, method_idx, dex_file);
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
return CompiledMethod::SwapAllocCompiledMethod(
GetCompilerDriver(),
@@ -1218,8 +1217,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
ArenaAllocator allocator(runtime->GetJitArenaPool());
if (UNLIKELY(method->IsNative())) {
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
- GetCompilerDriver(), access_flags, method_idx, *dex_file);
+ compiler_options, access_flags, method_idx, *dex_file);
ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
allocator.Adapter(kArenaAllocCHA));
@@ -1243,7 +1243,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return false;
}
- const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
if (compiler_options.GenerateAnyDebugInfo()) {
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
@@ -1420,8 +1419,8 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDe
// Create entry for the single method that we just compiled.
std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- GetCompilerDriver()->GetInstructionSet(),
- GetCompilerDriver()->GetInstructionSetFeatures(),
+ compiler_options.GetInstructionSet(),
+ compiler_options.GetInstructionSetFeatures(),
mini_debug_info,
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index f246228074..9a26f2f6c4 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -50,7 +50,6 @@ enum class MethodCompilationStat {
kNotCompiledThrowCatchLoop,
kNotCompiledAmbiguousArrayOp,
kNotCompiledHugeMethod,
- kNotCompiledIrreducibleAndStringInit,
kNotCompiledLargeMethodNoBranches,
kNotCompiledMalformedOpcode,
kNotCompiledNoCodegen,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index a9bc5664c0..f903f82d50 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -29,6 +29,7 @@
#include "dex/dex_instruction.h"
#include "dex/standard_dex_file.h"
#include "driver/dex_compilation_unit.h"
+#include "graph_checker.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
@@ -187,6 +188,77 @@ class OptimizingUnitTestHelper {
class OptimizingUnitTest : public CommonCompilerTest, public OptimizingUnitTestHelper {};
+// OptimizingUnitTest with some handy functions to ease the graph creation.
+class ImprovedOptimizingUnitTest : public OptimizingUnitTest {
+ public:
+ ImprovedOptimizingUnitTest() : graph_(CreateGraph()),
+ entry_block_(nullptr),
+ return_block_(nullptr),
+ exit_block_(nullptr),
+ parameter_(nullptr) {}
+
+ virtual ~ImprovedOptimizingUnitTest() {}
+
+ void InitGraph() {
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->SetEntryBlock(entry_block_);
+
+ return_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(return_block_);
+
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetExitBlock(exit_block_);
+
+ entry_block_->AddSuccessor(return_block_);
+ return_block_->AddSuccessor(exit_block_);
+
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
+ entry_block_->AddInstruction(parameter_);
+ return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
+ }
+
+ bool CheckGraph() {
+ GraphChecker checker(graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ for (const std::string& error : checker.GetErrors()) {
+ std::cout << error << std::endl;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction,
+ ArenaVector<HInstruction*>* current_locals) {
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ (GetAllocator()),
+ current_locals->size(),
+ graph_->GetArtMethod(),
+ instruction->GetDexPc(),
+ instruction);
+
+ environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals));
+ instruction->SetRawEnvironment(environment);
+ return environment;
+ }
+
+ protected:
+ HGraph* graph_;
+
+ HBasicBlock* entry_block_;
+ HBasicBlock* return_block_;
+ HBasicBlock* exit_block_;
+
+ HInstruction* parameter_;
+};
+
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 9049457da5..05ec765b19 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -193,18 +193,19 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
void HandleInvoke(HInvoke* invoke) {
- // If this is an invoke-static/-direct with PC-relative dex cache array
- // addressing, we need the PC-relative address base.
HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
- // We can't add a pointer to the constant area if we already have a current
- // method pointer. This may arise when sharpening doesn't remove the current
- // method pointer from the invoke.
- if (invoke_static_or_direct != nullptr &&
- invoke_static_or_direct->HasCurrentMethodInput()) {
+
+ // We can't add the method address if we already have a current method pointer.
+ // This may arise when sharpening doesn't remove the current method pointer from the invoke.
+ if (invoke_static_or_direct != nullptr && invoke_static_or_direct->HasCurrentMethodInput()) {
+ // Note: This happens only for recursive calls (including compiling an intrinsic
+ // by faking a call to itself; we use kRuntimeCall for this case).
DCHECK(!invoke_static_or_direct->HasPcRelativeMethodLoadKind());
return;
}
+ // If this is an invoke-static/-direct with PC-relative addressing (within boot image
+ // or using .bss or .data.bimg.rel.ro), we need the PC-relative address base.
bool base_added = false;
if (invoke_static_or_direct != nullptr &&
invoke_static_or_direct->HasPcRelativeMethodLoadKind() &&
@@ -224,7 +225,6 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
}
}
- // These intrinsics need the constant area.
switch (invoke->GetIntrinsic()) {
case Intrinsics::kMathAbsDouble:
case Intrinsics::kMathAbsFloat:
@@ -235,7 +235,15 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
LOG(FATAL) << "Unreachable min/max/abs: intrinsics should have been lowered "
"to IR nodes by instruction simplifier";
UNREACHABLE();
+ case Intrinsics::kIntegerValueOf:
+ // This intrinsic can be call free if it loads the address of the boot image object.
+ // If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
+ if (!codegen_->GetCompilerOptions().GetCompilePic()) {
+ break;
+ }
+ FALLTHROUGH_INTENDED;
case Intrinsics::kMathRoundFloat:
+ // This intrinsic needs the constant area.
if (!base_added) {
DCHECK(invoke_static_or_direct != nullptr);
DCHECK(!invoke_static_or_direct->HasCurrentMethodInput());
diff --git a/compiler/optimizing/register_allocator_graph_color.cc b/compiler/optimizing/register_allocator_graph_color.cc
index fa7ad82316..42e6498148 100644
--- a/compiler/optimizing/register_allocator_graph_color.cc
+++ b/compiler/optimizing/register_allocator_graph_color.cc
@@ -1183,7 +1183,7 @@ static bool CheckInputOutputCanOverlap(InterferenceNode* in_node, InterferenceNo
void ColoringIteration::BuildInterferenceGraph(
const ScopedArenaVector<LiveInterval*>& intervals,
const ScopedArenaVector<InterferenceNode*>& physical_nodes) {
- DCHECK(interval_node_map_.Empty() && prunable_nodes_.empty());
+ DCHECK(interval_node_map_.empty() && prunable_nodes_.empty());
// Build the interference graph efficiently by ordering range endpoints
// by position and doing a linear sweep to find interferences. (That is, we
// jump from endpoint to endpoint, maintaining a set of intervals live at each
@@ -1208,7 +1208,7 @@ void ColoringIteration::BuildInterferenceGraph(
if (range != nullptr) {
InterferenceNode* node =
new (allocator_) InterferenceNode(sibling, register_allocator_->liveness_);
- interval_node_map_.Insert(std::make_pair(sibling, node));
+ interval_node_map_.insert(std::make_pair(sibling, node));
if (sibling->HasRegister()) {
// Fixed nodes should alias the canonical node for the corresponding register.
@@ -1303,7 +1303,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
// Coalesce siblings.
LiveInterval* next_sibling = interval->GetNextSibling();
if (next_sibling != nullptr && interval->GetEnd() == next_sibling->GetStart()) {
- auto it = interval_node_map_.Find(next_sibling);
+ auto it = interval_node_map_.find(next_sibling);
if (it != interval_node_map_.end()) {
InterferenceNode* sibling_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1318,7 +1318,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
if (parent->HasRegister()
&& parent->GetNextSibling() == interval
&& parent->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(parent);
+ auto it = interval_node_map_.find(parent);
if (it != interval_node_map_.end()) {
InterferenceNode* parent_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1341,7 +1341,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
size_t position = predecessor->GetLifetimeEnd() - 1;
LiveInterval* existing = interval->GetParent()->GetSiblingAt(position);
if (existing != nullptr) {
- auto it = interval_node_map_.Find(existing);
+ auto it = interval_node_map_.find(existing);
if (it != interval_node_map_.end()) {
InterferenceNode* existing_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1364,7 +1364,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
size_t position = predecessors[i]->GetLifetimeEnd() - 1;
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(position);
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node, input_node, CoalesceKind::kPhi, position);
@@ -1380,7 +1380,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
= defined_by->InputAt(0)->GetLiveInterval()->GetSiblingAt(interval->GetStart() - 1);
// TODO: Could we consider lifetime holes here?
if (input_interval->GetEnd() == interval->GetStart()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
@@ -1407,7 +1407,7 @@ void ColoringIteration::FindCoalesceOpportunities() {
LiveInterval* input_interval = inputs[i]->GetLiveInterval()->GetSiblingAt(def_point);
if (input_interval != nullptr &&
input_interval->HasHighInterval() == interval->HasHighInterval()) {
- auto it = interval_node_map_.Find(input_interval);
+ auto it = interval_node_map_.find(input_interval);
if (it != interval_node_map_.end()) {
InterferenceNode* input_node = it->second;
CreateCoalesceOpportunity(node,
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index a70b0664dc..7144775c2b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -40,6 +40,12 @@ using Strategy = RegisterAllocator::Strategy;
class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
+ void SetUp() OVERRIDE {
+ // This test is using the x86 ISA.
+ OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
+ OptimizingUnitTest::SetUp();
+ }
+
// These functions need to access private variables of LocationSummary, so we declare it
// as a member of RegisterAllocatorTest, which we make a friend class.
void SameAsFirstInputHint(Strategy strategy);
@@ -81,9 +87,7 @@ TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
bool RegisterAllocatorTest::Check(const std::vector<uint16_t>& data, Strategy strategy) {
HGraph* graph = CreateCFG(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::unique_ptr<RegisterAllocator> register_allocator =
@@ -98,9 +102,7 @@ bool RegisterAllocatorTest::Check(const std::vector<uint16_t>& data, Strategy st
*/
TEST_F(RegisterAllocatorTest, ValidateIntervals) {
HGraph* graph = CreateGraph();
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
ScopedArenaVector<LiveInterval*> intervals(GetScopedAllocator()->Adapter());
// Test with two intervals of the same range.
@@ -324,9 +326,7 @@ void RegisterAllocatorTest::Loop3(Strategy strategy) {
Instruction::GOTO | 0xF900);
HGraph* graph = CreateCFG(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::unique_ptr<RegisterAllocator> register_allocator =
@@ -359,9 +359,7 @@ TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
Instruction::RETURN_VOID);
HGraph* graph = CreateCFG(data);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -412,9 +410,7 @@ void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
std::unique_ptr<RegisterAllocator> register_allocator =
@@ -438,9 +434,7 @@ TEST_F(RegisterAllocatorTest, FreeUntil) {
HGraph* graph = CreateCFG(data);
SsaDeadPhiElimination(graph).Run();
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
RegisterAllocatorLinearScan register_allocator(GetScopedAllocator(), &codegen, liveness);
@@ -566,9 +560,7 @@ void RegisterAllocatorTest::PhiHint(Strategy strategy) {
{
HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -584,9 +576,7 @@ void RegisterAllocatorTest::PhiHint(Strategy strategy) {
{
HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -604,9 +594,7 @@ void RegisterAllocatorTest::PhiHint(Strategy strategy) {
{
HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -624,9 +612,7 @@ void RegisterAllocatorTest::PhiHint(Strategy strategy) {
{
HGraph* graph = BuildIfElseWithPhi(&phi, &input1, &input2);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -689,9 +675,7 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
{
HGraph* graph = BuildFieldReturn(&field, &ret);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -705,9 +689,7 @@ void RegisterAllocatorTest::ExpectedInRegisterHint(Strategy strategy) {
{
HGraph* graph = BuildFieldReturn(&field, &ret);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -761,9 +743,7 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
{
HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -778,9 +758,7 @@ void RegisterAllocatorTest::SameAsFirstInputHint(Strategy strategy) {
{
HGraph* graph = BuildTwoSubs(&first_sub, &second_sub);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -834,9 +812,7 @@ HGraph* RegisterAllocatorTest::BuildDiv(HInstruction** div) {
void RegisterAllocatorTest::ExpectedExactInRegisterAndSameOutputHint(Strategy strategy) {
HInstruction *div;
HGraph* graph = BuildDiv(&div);
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
liveness.Analyze();
@@ -934,9 +910,7 @@ TEST_F(RegisterAllocatorTest, SpillInactive) {
new (GetAllocator()) LocationSummary(fourth->GetDefinedBy(), LocationSummary::kNoCall);
locations->SetOut(Location::RequiresRegister());
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
+ x86::CodeGeneratorX86 codegen(graph, *compiler_options_);
SsaLivenessAnalysis liveness(graph, &codegen, GetScopedAllocator());
// Populate the instructions in the liveness object, to please the register allocator.
for (size_t i = 0; i < 32; ++i) {
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 8e98f192d8..c7683e04a7 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -262,14 +262,14 @@ class SchedulingGraph : public ValueObject {
std::unique_ptr<SchedulingNode> node(
new (allocator_) SchedulingNode(instr, allocator_, is_scheduling_barrier));
SchedulingNode* result = node.get();
- nodes_map_.Insert(std::make_pair(instr, std::move(node)));
+ nodes_map_.insert(std::make_pair(instr, std::move(node)));
contains_scheduling_barrier_ |= is_scheduling_barrier;
AddDependencies(instr, is_scheduling_barrier);
return result;
}
void Clear() {
- nodes_map_.Clear();
+ nodes_map_.clear();
contains_scheduling_barrier_ = false;
}
@@ -278,7 +278,7 @@ class SchedulingGraph : public ValueObject {
}
SchedulingNode* GetNode(const HInstruction* instr) const {
- auto it = nodes_map_.Find(instr);
+ auto it = nodes_map_.find(instr);
if (it == nodes_map_.end()) {
return nullptr;
} else {
@@ -294,7 +294,7 @@ class SchedulingGraph : public ValueObject {
bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
size_t Size() const {
- return nodes_map_.Size();
+ return nodes_map_.size();
}
// Dump the scheduling graph, in dot file format, appending it to the file
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index d4cae72c7e..7079e07ae1 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -192,7 +192,9 @@ class SchedulerTest : public OptimizingUnitTest {
HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
scheduling.Run(/*only_optimize_loop_blocks*/ false, /*schedule_randomly*/ true);
+ OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
RunCode(target_config,
+ *compiler_options_,
graph,
[](HGraph* graph_arg) { RemoveSuspendChecks(graph_arg); },
has_result, expected);
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 0d0f7cc748..dcc7f77fc2 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -45,7 +45,9 @@ static bool IsSimpleBlock(HBasicBlock* block) {
HInstruction* instruction = it.Current();
if (instruction->IsControlFlow()) {
return instruction->IsGoto() || instruction->IsReturn();
- } else if (instruction->CanBeMoved() && !instruction->HasSideEffects()) {
+ } else if (instruction->CanBeMoved() &&
+ !instruction->HasSideEffects() &&
+ !instruction->CanThrow()) {
if (instruction->IsSelect() &&
instruction->AsSelect()->GetCondition()->GetBlock() == block) {
// Count one HCondition and HSelect in the same block as a single instruction.
@@ -119,10 +121,14 @@ bool HSelectGenerator::Run() {
// TODO(dbrazdil): This puts an instruction between If and its condition.
// Implement moving of conditions to first users if possible.
while (!true_block->IsSingleGoto() && !true_block->IsSingleReturn()) {
- true_block->GetFirstInstruction()->MoveBefore(if_instruction);
+ HInstruction* instr = true_block->GetFirstInstruction();
+ DCHECK(!instr->CanThrow());
+ instr->MoveBefore(if_instruction);
}
while (!false_block->IsSingleGoto() && !false_block->IsSingleReturn()) {
- false_block->GetFirstInstruction()->MoveBefore(if_instruction);
+ HInstruction* instr = false_block->GetFirstInstruction();
+ DCHECK(!instr->CanThrow());
+ instr->MoveBefore(if_instruction);
}
DCHECK(true_block->IsSingleGoto() || true_block->IsSingleReturn());
DCHECK(false_block->IsSingleGoto() || false_block->IsSingleReturn());
diff --git a/compiler/optimizing/select_generator_test.cc b/compiler/optimizing/select_generator_test.cc
new file mode 100644
index 0000000000..6e6549737c
--- /dev/null
+++ b/compiler/optimizing/select_generator_test.cc
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "select_generator.h"
+
+#include "base/arena_allocator.h"
+#include "builder.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "side_effects_analysis.h"
+
+namespace art {
+
+class SelectGeneratorTest : public ImprovedOptimizingUnitTest {
+ public:
+ void ConstructBasicGraphForSelect(HInstruction* instr) {
+ HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* then_block = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* else_block = new (GetAllocator()) HBasicBlock(graph_);
+
+ graph_->AddBlock(if_block);
+ graph_->AddBlock(then_block);
+ graph_->AddBlock(else_block);
+
+ entry_block_->ReplaceSuccessor(return_block_, if_block);
+
+ if_block->AddSuccessor(then_block);
+ if_block->AddSuccessor(else_block);
+ then_block->AddSuccessor(return_block_);
+ else_block->AddSuccessor(return_block_);
+
+ HParameterValue* bool_param = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 1,
+ DataType::Type::kBool);
+ entry_block_->AddInstruction(bool_param);
+ HIntConstant* const1 = graph_->GetIntConstant(1);
+
+ if_block->AddInstruction(new (GetAllocator()) HIf(bool_param));
+
+ then_block->AddInstruction(instr);
+ then_block->AddInstruction(new (GetAllocator()) HGoto());
+
+ else_block->AddInstruction(new (GetAllocator()) HGoto());
+
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ return_block_->AddPhi(phi);
+ phi->AddInput(instr);
+ phi->AddInput(const1);
+ }
+
+ bool CheckGraphAndTrySelectGenerator() {
+ graph_->BuildDominatorTree();
+ EXPECT_TRUE(CheckGraph());
+
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+ return HSelectGenerator(graph_, /*handles*/ nullptr, /*stats*/ nullptr).Run();
+ }
+};
+
+// HDivZeroCheck might throw and should not be hoisted from the conditional to an unconditional.
+TEST_F(SelectGeneratorTest, testZeroCheck) {
+ InitGraph();
+ HDivZeroCheck* instr = new (GetAllocator()) HDivZeroCheck(parameter_, 0);
+ ConstructBasicGraphForSelect(instr);
+
+ ArenaVector<HInstruction*> current_locals({parameter_, graph_->GetIntConstant(1)},
+ GetAllocator()->Adapter(kArenaAllocInstruction));
+ ManuallyBuildEnvFor(instr, &current_locals);
+
+ EXPECT_FALSE(CheckGraphAndTrySelectGenerator());
+}
+
+// Test that SelectGenerator succeeds with HAdd.
+TEST_F(SelectGeneratorTest, testAdd) {
+ InitGraph();
+ HAdd* instr = new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter_, parameter_, 0);
+ ConstructBasicGraphForSelect(instr);
+ EXPECT_TRUE(CheckGraphAndTrySelectGenerator());
+}
+
+} // namespace art
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 6541043046..27482ac5bf 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -21,7 +21,6 @@
#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
-#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "gc/heap.h"
@@ -42,9 +41,7 @@ bool HSharpening::Run() {
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* instruction = it.Current();
if (instruction->IsInvokeStaticOrDirect()) {
- SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(),
- codegen_,
- compiler_driver_);
+ SharpenInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect(), codegen_);
}
// TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
// here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
@@ -70,21 +67,17 @@ static bool AOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& options)
return IsInBootImage(method) && !options.GetCompilePic();
}
-static bool BootImageAOTCanEmbedMethod(ArtMethod* method, CompilerDriver* compiler_driver) {
- DCHECK(compiler_driver->GetCompilerOptions().IsBootImage());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- return false;
- }
+static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& compiler_options) {
+ DCHECK(compiler_options.IsBootImage());
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
DCHECK(klass != nullptr);
const DexFile& dex_file = klass->GetDexFile();
- return compiler_driver->IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
+ return compiler_options.IsImageClass(dex_file.StringByTypeIdx(klass->GetDexTypeIndex()));
}
void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) {
+ CodeGenerator* codegen) {
if (invoke->IsStringInit()) {
// Not using the dex cache arrays. But we could still try to use a better dispatch...
// TODO: Use direct_method and direct_code for the appropriate StringFactory method.
@@ -111,21 +104,29 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
// We don't optimize for debuggable as it would prevent us from obsoleting the method in some
// situations.
+ const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
if (callee == codegen->GetGraph()->GetArtMethod() && !codegen->GetGraph()->IsDebuggable()) {
// Recursive call.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
+ } else if (compiler_options.IsBootImage()) {
+ if (!compiler_options.GetCompilePic()) {
+ // Test configuration, do not sharpen.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
+ } else if (BootImageAOTCanEmbedMethod(callee, compiler_options)) {
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
+ } else {
+ // Use PC-relative access to the .bss methods array.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBssEntry;
+ }
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else if (Runtime::Current()->UseJitCompilation() ||
- AOTCanEmbedMethod(callee, codegen->GetCompilerOptions())) {
+ AOTCanEmbedMethod(callee, compiler_options)) {
// JIT or on-device AOT compilation referencing a boot image method.
// Use the method address directly.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
method_load_data = reinterpret_cast<uintptr_t>(callee);
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- } else if (codegen->GetCompilerOptions().IsBootImage() &&
- BootImageAOTCanEmbedMethod(callee, compiler_driver)) {
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else if (IsInBootImage(callee)) {
// Use PC-relative access to the .data.bimg.rel.ro methods array.
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
@@ -153,7 +154,6 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
HLoadClass* load_class,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit) {
Handle<mirror::Class> klass = load_class->GetClass();
DCHECK(load_class->GetLoadKind() == HLoadClass::LoadKind::kRuntimeCall ||
@@ -177,26 +177,27 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
bool is_in_boot_image = false;
HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
Runtime* runtime = Runtime::Current();
- if (codegen->GetCompilerOptions().IsBootImage()) {
+ const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
+ if (compiler_options.IsBootImage()) {
// Compiling boot image. Check if the class is a boot image class.
DCHECK(!runtime->UseJitCompilation());
- if (!compiler_driver->GetSupportBootImageFixup()) {
- // compiler_driver_test. Do not sharpen.
+ if (!compiler_options.GetCompilePic()) {
+ // Test configuration, do not sharpen.
desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
} else if ((klass != nullptr) &&
- compiler_driver->IsImageClass(dex_file.StringByTypeIdx(type_index))) {
+ compiler_options.IsImageClass(dex_file.StringByTypeIdx(type_index))) {
is_in_boot_image = true;
desired_load_kind = HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
} else {
// Not a boot image class.
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ DCHECK(ContainsElement(compiler_options.GetDexFilesForOatFile(), &dex_file));
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
}
} else {
is_in_boot_image = (klass != nullptr) &&
runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
if (runtime->UseJitCompilation()) {
- DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
+ DCHECK(!compiler_options.GetCompilePic());
if (is_in_boot_image) {
// TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
@@ -241,9 +242,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
return load_kind;
}
-static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver)
+static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass, CodeGenerator* codegen)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!klass->IsProxyClass());
DCHECK(!klass->IsArrayClass());
@@ -252,7 +251,7 @@ static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass,
// If we're JITting, try to assign a type check bitstring (fall through).
} else if (codegen->GetCompilerOptions().IsBootImage()) {
const char* descriptor = klass->GetDexFile().StringByTypeIdx(klass->GetDexTypeIndex());
- if (!compiler_driver->IsImageClass(descriptor)) {
+ if (!codegen->GetCompilerOptions().IsImageClass(descriptor)) {
return false;
}
// If the target is a boot image class, try to assign a type check bitstring (fall through).
@@ -265,7 +264,7 @@ static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass,
// Try to assign a type check bitstring.
MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
- if ((false) && // FIXME: Inliner does not respect compiler_driver->IsClassToCompile()
+ if ((false) && // FIXME: Inliner does not respect CompilerDriver::IsClassToCompile()
// and we're hitting an unassigned bitstring in dex2oat_image_test. b/26687569
kIsDebugBuild &&
codegen->GetCompilerOptions().IsBootImage() &&
@@ -281,7 +280,6 @@ static inline bool CanUseTypeCheckBitstring(ObjPtr<mirror::Class> klass,
TypeCheckKind HSharpening::ComputeTypeCheckKind(ObjPtr<mirror::Class> klass,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
bool needs_access_check) {
if (klass == nullptr) {
return TypeCheckKind::kUnresolvedCheck;
@@ -299,7 +297,7 @@ TypeCheckKind HSharpening::ComputeTypeCheckKind(ObjPtr<mirror::Class> klass,
return TypeCheckKind::kExactCheck;
} else if (kBitstringSubtypeCheckEnabled &&
!needs_access_check &&
- CanUseTypeCheckBitstring(klass, codegen, compiler_driver)) {
+ CanUseTypeCheckBitstring(klass, codegen)) {
// TODO: We should not need the `!needs_access_check` check but getting rid of that
// requires rewriting some optimizations in instruction simplifier.
return TypeCheckKind::kBitstringCheck;
@@ -313,7 +311,6 @@ TypeCheckKind HSharpening::ComputeTypeCheckKind(ObjPtr<mirror::Class> klass,
void HSharpening::ProcessLoadString(
HLoadString* load_string,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles) {
DCHECK_EQ(load_string->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall);
@@ -332,17 +329,18 @@ void HSharpening::ProcessLoadString(
: hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file));
ObjPtr<mirror::String> string = nullptr;
- if (codegen->GetCompilerOptions().IsBootImage()) {
+ const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
+ if (compiler_options.IsBootImage()) {
// Compiling boot image. Resolve the string and allocate it if needed, to ensure
// the string will be added to the boot image.
DCHECK(!runtime->UseJitCompilation());
string = class_linker->ResolveString(string_index, dex_cache);
CHECK(string != nullptr);
- if (compiler_driver->GetSupportBootImageFixup()) {
- DCHECK(ContainsElement(compiler_driver->GetDexFilesForOatFile(), &dex_file));
+ if (compiler_options.GetCompilePic()) {
+ DCHECK(ContainsElement(compiler_options.GetDexFilesForOatFile(), &dex_file));
desired_load_kind = HLoadString::LoadKind::kBootImageLinkTimePcRelative;
} else {
- // compiler_driver_test. Do not sharpen.
+ // Test configuration, do not sharpen.
desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
}
} else if (runtime->UseJitCompilation()) {
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index 9ccbcaf220..cbac361891 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -23,7 +23,6 @@
namespace art {
class CodeGenerator;
-class CompilerDriver;
class DexCompilationUnit;
// Optimization that tries to improve the way we dispatch methods and access types,
@@ -34,45 +33,37 @@ class HSharpening : public HOptimization {
public:
HSharpening(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const char* name = kSharpeningPassName)
: HOptimization(graph, name),
- codegen_(codegen),
- compiler_driver_(compiler_driver) { }
+ codegen_(codegen) { }
bool Run() OVERRIDE;
static constexpr const char* kSharpeningPassName = "sharpening";
// Used by Sharpening and InstructionSimplifier.
- static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver);
+ static void SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke, CodeGenerator* codegen);
// Used by the builder and the inliner.
static HLoadClass::LoadKind ComputeLoadClassKind(HLoadClass* load_class,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit)
REQUIRES_SHARED(Locks::mutator_lock_);
// Used by the builder.
static TypeCheckKind ComputeTypeCheckKind(ObjPtr<mirror::Class> klass,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
bool needs_access_check)
REQUIRES_SHARED(Locks::mutator_lock_);
// Used by the builder.
static void ProcessLoadString(HLoadString* load_string,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const DexCompilationUnit& dex_compilation_unit,
VariableSizedHandleScope* handles);
private:
CodeGenerator* codegen_;
- CompilerDriver* compiler_driver_;
};
} // namespace art
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index dd54468217..dda29a1b4b 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -440,6 +440,62 @@ static bool HasAliasInEnvironments(HInstruction* instruction) {
return false;
}
+void SsaBuilder::ReplaceUninitializedStringPhis() {
+ ScopedArenaHashSet<HInstruction*> seen_instructions(
+ local_allocator_->Adapter(kArenaAllocGraphBuilder));
+ ScopedArenaVector<HInstruction*> worklist(local_allocator_->Adapter(kArenaAllocGraphBuilder));
+
+ // Iterate over all inputs and uses of the phi, recursively, until all related instructions
+ // have been visited.
+ for (const auto& pair : uninitialized_string_phis_) {
+ HPhi* string_phi = pair.first;
+ HInvoke* invoke = pair.second;
+ worklist.push_back(string_phi);
+ HNewInstance* found_instance = nullptr;
+ do {
+ HInstruction* current = worklist.back();
+ worklist.pop_back();
+ if (seen_instructions.find(current) != seen_instructions.end()) {
+ continue;
+ }
+ seen_instructions.insert(current);
+ if (current->IsNewInstance()) {
+ // If it is the first time we see the allocation, replace its uses. We don't register
+ // it through `RemoveRedundantUninitializedStrings`, as that method makes assumption about
+ // aliasing and environment uses that don't hold when the string escapes to phis.
+ // Note that this also means we will keep the (useless) allocation.
+ if (found_instance == nullptr) {
+ found_instance = current->AsNewInstance();
+ } else {
+ DCHECK(found_instance == current);
+ }
+ } else if (current->IsPhi()) {
+ // Push all inputs to the worklist. Those should be Phis or NewInstance.
+ for (HInstruction* input : current->GetInputs()) {
+ DCHECK(input->IsPhi() || input->IsNewInstance()) << input->DebugName();
+ worklist.push_back(input);
+ }
+ } else {
+ // The verifier prevents any other DEX uses of the uninitialized string.
+ DCHECK(current->IsEqual() || current->IsNotEqual());
+ continue;
+ }
+ current->ReplaceUsesDominatedBy(invoke, invoke);
+ current->ReplaceEnvUsesDominatedBy(invoke, invoke);
+ // Push all users to the worklist. Now that we have replaced
+ // the uses dominated by the invokes, the remaining users should only
+ // be Phi, or Equal/NotEqual.
+ for (const HUseListNode<HInstruction*>& use : current->GetUses()) {
+ HInstruction* user = use.GetUser();
+ DCHECK(user->IsPhi() || user->IsEqual() || user->IsNotEqual()) << user->DebugName();
+ worklist.push_back(user);
+ }
+ } while (!worklist.empty());
+ seen_instructions.clear();
+ DCHECK(found_instance != nullptr);
+ }
+}
+
void SsaBuilder::RemoveRedundantUninitializedStrings() {
if (graph_->IsDebuggable()) {
// Do not perform the optimization for consistency with the interpreter
@@ -488,27 +544,32 @@ void SsaBuilder::RemoveRedundantUninitializedStrings() {
GraphAnalysisResult SsaBuilder::BuildSsa() {
DCHECK(!graph_->IsInSsaForm());
- // 1) Propagate types of phis. At this point, phis are typed void in the general
+ // Replace Phis that feed in a String.<init>, as well as their aliases, with
+ // the actual String allocation invocation. We do this first, as the phis stored in
+ // the data structure might get removed from the graph in later stages during `BuildSsa`.
+ ReplaceUninitializedStringPhis();
+
+ // Propagate types of phis. At this point, phis are typed void in the general
// case, or float/double/reference if we created an equivalent phi. So we need
// to propagate the types across phis to give them a correct type. If a type
// conflict is detected in this stage, the phi is marked dead.
RunPrimitiveTypePropagation();
- // 2) Now that the correct primitive types have been assigned, we can get rid
+ // Now that the correct primitive types have been assigned, we can get rid
// of redundant phis. Note that we cannot do this phase before type propagation,
// otherwise we could get rid of phi equivalents, whose presence is a requirement
// for the type propagation phase. Note that this is to satisfy statement (a)
// of the SsaBuilder (see ssa_builder.h).
SsaRedundantPhiElimination(graph_).Run();
- // 3) Fix the type for null constants which are part of an equality comparison.
+ // Fix the type for null constants which are part of an equality comparison.
// We need to do this after redundant phi elimination, to ensure the only cases
// that we can see are reference comparison against 0. The redundant phi
// elimination ensures we do not see a phi taking two 0 constants in a HEqual
// or HNotEqual.
FixNullConstantType();
- // 4) Compute type of reference type instructions. The pass assumes that
+ // Compute type of reference type instructions. The pass assumes that
// NullConstant has been fixed up.
ReferenceTypePropagation(graph_,
class_loader_,
@@ -516,7 +577,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
handles_,
/* is_first_run */ true).Run();
- // 5) HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
+ // HInstructionBuilder duplicated ArrayGet instructions with ambiguous type
// (int/float or long/double) and marked ArraySets with ambiguous input type.
// Now that RTP computed the type of the array input, the ambiguity can be
// resolved and the correct equivalents kept.
@@ -524,13 +585,13 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
return kAnalysisFailAmbiguousArrayOp;
}
- // 6) Mark dead phis. This will mark phis which are not used by instructions
+ // Mark dead phis. This will mark phis which are not used by instructions
// or other live phis. If compiling as debuggable code, phis will also be kept
// live if they have an environment use.
SsaDeadPhiElimination dead_phi_elimimation(graph_);
dead_phi_elimimation.MarkDeadPhis();
- // 7) Make sure environments use the right phi equivalent: a phi marked dead
+ // Make sure environments use the right phi equivalent: a phi marked dead
// can have a phi equivalent that is not dead. In that case we have to replace
// it with the live equivalent because deoptimization and try/catch rely on
// environments containing values of all live vregs at that point. Note that
@@ -539,14 +600,14 @@ GraphAnalysisResult SsaBuilder::BuildSsa() {
// environments to just reference one.
FixEnvironmentPhis();
- // 8) Now that the right phis are used for the environments, we can eliminate
+ // Now that the right phis are used for the environments, we can eliminate
// phis we do not need. Regardless of the debuggable status, this phase is
/// necessary for statement (b) of the SsaBuilder (see ssa_builder.h), as well
// as for the code generation, which does not deal with phis of conflicting
// input types.
dead_phi_elimimation.EliminateDeadPhis();
- // 9) HInstructionBuidler replaced uses of NewInstances of String with the
+ // HInstructionBuidler replaced uses of NewInstances of String with the
// results of their corresponding StringFactory calls. Unless the String
// objects are used before they are initialized, they can be replaced with
// NullConstant. Note that this optimization is valid only if unsimplified
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 60831a9e6a..765544508e 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -61,7 +61,8 @@ class SsaBuilder : public ValueObject {
local_allocator_(local_allocator),
ambiguous_agets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
ambiguous_asets_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
- uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
+ uninitialized_strings_(local_allocator->Adapter(kArenaAllocGraphBuilder)),
+ uninitialized_string_phis_(local_allocator->Adapter(kArenaAllocGraphBuilder)) {
graph_->InitializeInexactObjectRTI(handles);
}
@@ -96,6 +97,10 @@ class SsaBuilder : public ValueObject {
}
}
+ void AddUninitializedStringPhi(HPhi* phi, HInvoke* invoke) {
+ uninitialized_string_phis_.push_back(std::make_pair(phi, invoke));
+ }
+
private:
void SetLoopHeaderPhiInputs();
void FixEnvironmentPhis();
@@ -118,6 +123,7 @@ class SsaBuilder : public ValueObject {
HArrayGet* GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget);
void RemoveRedundantUninitializedStrings();
+ void ReplaceUninitializedStringPhis();
HGraph* const graph_;
Handle<mirror::ClassLoader> class_loader_;
@@ -131,6 +137,7 @@ class SsaBuilder : public ValueObject {
ScopedArenaVector<HArrayGet*> ambiguous_agets_;
ScopedArenaVector<HArraySet*> ambiguous_asets_;
ScopedArenaVector<HNewInstance*> uninitialized_strings_;
+ ScopedArenaVector<std::pair<HPhi*, HInvoke*>> uninitialized_string_phis_;
DISALLOW_COPY_AND_ASSIGN(SsaBuilder);
};
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index f6bd05269e..2f782f39fc 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -195,14 +195,19 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
// SsaLivenessAnalysis.
for (size_t i = 0, e = environment->Size(); i < e; ++i) {
HInstruction* instruction = environment->GetInstructionAt(i);
+ if (instruction == nullptr) {
+ continue;
+ }
bool should_be_live = ShouldBeLiveForEnvironment(current, instruction);
+ // If this environment use does not keep the instruction live, it does not
+ // affect the live range of that instruction.
if (should_be_live) {
CHECK(instruction->HasSsaIndex()) << instruction->DebugName();
live_in->SetBit(instruction->GetSsaIndex());
- }
- if (instruction != nullptr) {
- instruction->GetLiveInterval()->AddUse(
- current, environment, i, /* actual_user */ nullptr, should_be_live);
+ instruction->GetLiveInterval()->AddUse(current,
+ environment,
+ i,
+ /* actual_user */ nullptr);
}
}
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index f83bb52b69..83ca5bd5fa 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -300,8 +300,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
void AddUse(HInstruction* instruction,
HEnvironment* environment,
size_t input_index,
- HInstruction* actual_user = nullptr,
- bool keep_alive = false) {
+ HInstruction* actual_user = nullptr) {
bool is_environment = (environment != nullptr);
LocationSummary* locations = instruction->GetLocations();
if (actual_user == nullptr) {
@@ -359,12 +358,6 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
uses_.push_front(*new_use);
}
- if (is_environment && !keep_alive) {
- // If this environment use does not keep the instruction live, it does not
- // affect the live range of that instruction.
- return;
- }
-
size_t start_block_position = instruction->GetBlock()->GetLifetimeStart();
if (first_range_ == nullptr) {
// First time we see a use of that interval.
@@ -1157,8 +1150,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
* of an instruction that has a primitive type make the instruction live.
* If the graph does not have the debuggable property, the environment
* use has no effect, and may get a 'none' value after register allocation.
+ * (d) When compiling in OSR mode, all loops in the compiled method may be entered
+ * from the interpreter via SuspendCheck; such use in SuspendCheck makes the instruction
+ * live.
*
- * (b) and (c) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment.
+ * (b), (c) and (d) are implemented through SsaLivenessAnalysis::ShouldBeLiveForEnvironment.
*/
class SsaLivenessAnalysis : public ValueObject {
public:
@@ -1259,14 +1255,18 @@ class SsaLivenessAnalysis : public ValueObject {
// Returns whether `instruction` in an HEnvironment held by `env_holder`
// should be kept live by the HEnvironment.
static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) {
- if (instruction == nullptr) return false;
+ DCHECK(instruction != nullptr);
// A value that's not live in compiled code may still be needed in interpreter,
// due to code motion, etc.
if (env_holder->IsDeoptimize()) return true;
// A value live at a throwing instruction in a try block may be copied by
// the exception handler to its location at the top of the catch block.
if (env_holder->CanThrowIntoCatchBlock()) return true;
- if (instruction->GetBlock()->GetGraph()->IsDebuggable()) return true;
+ HGraph* graph = instruction->GetBlock()->GetGraph();
+ if (graph->IsDebuggable()) return true;
+ // When compiling in OSR mode, all loops in the compiled method may be entered
+ // from the interpreter via SuspendCheck; thus we need to preserve the environment.
+ if (env_holder->IsSuspendCheck() && graph->IsCompilingOsr()) return true;
return instruction->GetType() == DataType::Type::kReference;
}
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index b9bfbaa173..a683c698d9 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -28,18 +28,11 @@
namespace art {
class SsaLivenessAnalysisTest : public OptimizingUnitTest {
- public:
- SsaLivenessAnalysisTest()
- : graph_(CreateGraph()),
- compiler_options_(),
- instruction_set_(kRuntimeISA) {
- std::string error_msg;
- instruction_set_features_ =
- InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg);
- codegen_ = CodeGenerator::Create(graph_,
- instruction_set_,
- *instruction_set_features_,
- compiler_options_);
+ protected:
+ void SetUp() OVERRIDE {
+ OptimizingUnitTest::SetUp();
+ graph_ = CreateGraph();
+ codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture.";
// Create entry block.
entry_ = new (GetAllocator()) HBasicBlock(graph_);
@@ -57,9 +50,6 @@ class SsaLivenessAnalysisTest : public OptimizingUnitTest {
}
HGraph* graph_;
- CompilerOptions compiler_options_;
- InstructionSet instruction_set_;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
std::unique_ptr<CodeGenerator> codegen_;
HBasicBlock* entry_;
};
@@ -134,12 +124,12 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) {
static const char* const expected[] = {
"ranges: { [2,21) }, uses: { 15 17 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 "
"is_high: 0",
- "ranges: { [4,21) }, uses: { 19 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 "
+ "ranges: { [4,21) }, uses: { 19 21 }, { } is_fixed: 0, is_split: 0 is_low: 0 "
"is_high: 0",
- "ranges: { [6,21) }, uses: { 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 "
+ "ranges: { [6,21) }, uses: { 21 }, { } is_fixed: 0, is_split: 0 is_low: 0 "
"is_high: 0",
// Environment uses do not keep the non-reference argument alive.
- "ranges: { [8,10) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
+ "ranges: { [8,10) }, uses: { }, { } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
// Environment uses keep the reference argument alive.
"ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
@@ -207,11 +197,11 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
static const char* const expected[] = {
"ranges: { [2,23) }, uses: { 15 17 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 "
"is_high: 0",
- "ranges: { [4,23) }, uses: { 19 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 "
+ "ranges: { [4,23) }, uses: { 19 23 }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 "
"is_high: 0",
- "ranges: { [6,23) }, uses: { 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
+ "ranges: { [6,23) }, uses: { 23 }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
// Environment use in HDeoptimize keeps even the non-reference argument alive.
- "ranges: { [8,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
+ "ranges: { [8,21) }, uses: { }, { 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
// Environment uses keep the reference argument alive.
"ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0",
};
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 3685ab2df4..3e1a36dc9b 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -31,35 +31,63 @@ namespace art {
constexpr static bool kVerifyStackMaps = kIsDebugBuild;
uint32_t StackMapStream::GetStackMapNativePcOffset(size_t i) {
- return StackMap::UnpackNativePc(stack_maps_[i].packed_native_pc, instruction_set_);
+ return StackMap::UnpackNativePc(stack_maps_[i][StackMap::kPackedNativePc], instruction_set_);
}
void StackMapStream::SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_);
+ stack_maps_[i][StackMap::kPackedNativePc] =
+ StackMap::PackNativePc(native_pc_offset, instruction_set_);
+}
+
+void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ uint32_t num_dex_registers) {
+ DCHECK(!in_method_) << "Mismatched Begin/End calls";
+ in_method_ = true;
+ DCHECK_EQ(frame_size_in_bytes_, 0u) << "BeginMethod was already called";
+
+ frame_size_in_bytes_ = frame_size_in_bytes;
+ core_spill_mask_ = core_spill_mask;
+ fp_spill_mask_ = fp_spill_mask;
+ num_dex_registers_ = num_dex_registers;
+}
+
+void StackMapStream::EndMethod() {
+ DCHECK(in_method_) << "Mismatched Begin/End calls";
+ in_method_ = false;
}
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
uint32_t native_pc_offset,
uint32_t register_mask,
BitVector* stack_mask,
- uint32_t num_dex_registers,
- uint8_t inlining_depth) {
+ StackMap::Kind kind) {
+ DCHECK(in_method_) << "Call BeginMethod first";
DCHECK(!in_stack_map_) << "Mismatched Begin/End calls";
in_stack_map_ = true;
- current_stack_map_ = StackMapEntry {
- .packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_),
- .dex_pc = dex_pc,
- .register_mask_index = kNoValue,
- .stack_mask_index = kNoValue,
- .inline_info_index = kNoValue,
- .dex_register_mask_index = kNoValue,
- .dex_register_map_index = kNoValue,
- };
+ current_stack_map_ = BitTableBuilder<StackMap>::Entry();
+ current_stack_map_[StackMap::kKind] = static_cast<uint32_t>(kind);
+ current_stack_map_[StackMap::kPackedNativePc] =
+ StackMap::PackNativePc(native_pc_offset, instruction_set_);
+ current_stack_map_[StackMap::kDexPc] = dex_pc;
+ if (stack_maps_.size() > 0) {
+ // Check that non-catch stack maps are sorted by pc.
+ // Catch stack maps are at the end and may be unordered.
+ if (stack_maps_.back()[StackMap::kKind] == StackMap::Kind::Catch) {
+ DCHECK(current_stack_map_[StackMap::kKind] == StackMap::Kind::Catch);
+ } else if (current_stack_map_[StackMap::kKind] != StackMap::Kind::Catch) {
+ DCHECK_LE(stack_maps_.back()[StackMap::kPackedNativePc],
+ current_stack_map_[StackMap::kPackedNativePc]);
+ }
+ }
if (register_mask != 0) {
uint32_t shift = LeastSignificantBit(register_mask);
- RegisterMaskEntry entry = { register_mask >> shift, shift };
- current_stack_map_.register_mask_index = register_masks_.Dedup(&entry);
+ BitTableBuilder<RegisterMask>::Entry entry;
+ entry[RegisterMask::kValue] = register_mask >> shift;
+ entry[RegisterMask::kShift] = shift;
+ current_stack_map_[StackMap::kRegisterMaskIndex] = register_masks_.Dedup(&entry);
}
// The compiler assumes the bit vector will be read during PrepareForFillIn(),
// and it might modify the data before that. Therefore, just store the pointer.
@@ -67,15 +95,24 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
lazy_stack_masks_.push_back(stack_mask);
current_inline_infos_.clear();
current_dex_registers_.clear();
- expected_num_dex_registers_ = num_dex_registers;
+ expected_num_dex_registers_ = num_dex_registers_;
if (kVerifyStackMaps) {
size_t stack_map_index = stack_maps_.size();
// Create lambda method, which will be executed at the very end to verify data.
// Parameters and local variables will be captured(stored) by the lambda "[=]".
dchecks_.emplace_back([=](const CodeInfo& code_info) {
+ if (kind == StackMap::Kind::Default || kind == StackMap::Kind::OSR) {
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset,
+ instruction_set_);
+ CHECK_EQ(stack_map.Row(), stack_map_index);
+ } else if (kind == StackMap::Kind::Catch) {
+ StackMap stack_map = code_info.GetCatchStackMapForDexPc(dex_pc);
+ CHECK_EQ(stack_map.Row(), stack_map_index);
+ }
StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
CHECK_EQ(stack_map.GetNativePcOffset(instruction_set_), native_pc_offset);
+ CHECK_EQ(stack_map.GetKind(), static_cast<uint32_t>(kind));
CHECK_EQ(stack_map.GetDexPc(), dex_pc);
CHECK_EQ(code_info.GetRegisterMaskOf(stack_map), register_mask);
BitMemoryRegion seen_stack_mask = code_info.GetStackMaskOf(stack_map);
@@ -83,9 +120,6 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
for (size_t b = 0; b < seen_stack_mask.size_in_bits(); b++) {
CHECK_EQ(seen_stack_mask.LoadBit(b), stack_mask != nullptr && stack_mask->IsBitSet(b));
}
- CHECK_EQ(stack_map.HasInlineInfo(), (inlining_depth != 0));
- CHECK_EQ(code_info.GetInlineDepthOf(stack_map), inlining_depth);
- CHECK_EQ(stack_map.HasDexRegisterMap(), (num_dex_registers != 0));
});
}
}
@@ -93,35 +127,43 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
void StackMapStream::EndStackMapEntry() {
DCHECK(in_stack_map_) << "Mismatched Begin/End calls";
in_stack_map_ = false;
- DCHECK_EQ(expected_num_dex_registers_, current_dex_registers_.size());
// Generate index into the InlineInfo table.
+ size_t inlining_depth = current_inline_infos_.size();
if (!current_inline_infos_.empty()) {
- current_inline_infos_.back().is_last = InlineInfo::kLast;
- current_stack_map_.inline_info_index =
+ current_inline_infos_.back()[InlineInfo::kIsLast] = InlineInfo::kLast;
+ current_stack_map_[StackMap::kInlineInfoIndex] =
inline_infos_.Dedup(current_inline_infos_.data(), current_inline_infos_.size());
}
- stack_maps_.Add(current_stack_map_);
-}
+ // Generate delta-compressed dex register map.
+ size_t num_dex_registers = current_dex_registers_.size();
+ if (!current_dex_registers_.empty()) {
+ DCHECK_EQ(expected_num_dex_registers_, current_dex_registers_.size());
+ CreateDexRegisterMap();
+ }
-void StackMapStream::AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
- current_dex_registers_.push_back(DexRegisterLocation(kind, value));
+ stack_maps_.Add(current_stack_map_);
- // We have collected all the dex registers for StackMap/InlineInfo - create the map.
- if (current_dex_registers_.size() == expected_num_dex_registers_) {
- CreateDexRegisterMap();
+ if (kVerifyStackMaps) {
+ size_t stack_map_index = stack_maps_.size() - 1;
+ dchecks_.emplace_back([=](const CodeInfo& code_info) {
+ StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
+ CHECK_EQ(stack_map.HasDexRegisterMap(), (num_dex_registers != 0));
+ CHECK_EQ(stack_map.HasInlineInfo(), (inlining_depth != 0));
+ CHECK_EQ(code_info.GetInlineInfosOf(stack_map).size(), inlining_depth);
+ });
}
}
void StackMapStream::AddInvoke(InvokeType invoke_type, uint32_t dex_method_index) {
- uint32_t packed_native_pc = current_stack_map_.packed_native_pc;
+ uint32_t packed_native_pc = current_stack_map_[StackMap::kPackedNativePc];
size_t invoke_info_index = invoke_infos_.size();
- invoke_infos_.Add(InvokeInfoEntry {
- .packed_native_pc = packed_native_pc,
- .invoke_type = invoke_type,
- .method_info_index = method_infos_.Dedup(&dex_method_index),
- });
+ BitTableBuilder<InvokeInfo>::Entry entry;
+ entry[InvokeInfo::kPackedNativePc] = packed_native_pc;
+ entry[InvokeInfo::kInvokeType] = invoke_type;
+ entry[InvokeInfo::kMethodInfoIndex] = method_infos_.Dedup({dex_method_index});
+ invoke_infos_.Add(entry);
if (kVerifyStackMaps) {
dchecks_.emplace_back([=](const CodeInfo& code_info) {
@@ -129,7 +171,7 @@ void StackMapStream::AddInvoke(InvokeType invoke_type, uint32_t dex_method_index
CHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_),
StackMap::UnpackNativePc(packed_native_pc, instruction_set_));
CHECK_EQ(invoke_info.GetInvokeType(), invoke_type);
- CHECK_EQ(method_infos_[invoke_info.GetMethodInfoIndex()], dex_method_index);
+ CHECK_EQ(method_infos_[invoke_info.GetMethodInfoIndex()][0], dex_method_index);
});
}
}
@@ -138,51 +180,45 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
uint32_t dex_pc,
uint32_t num_dex_registers,
const DexFile* outer_dex_file) {
+ DCHECK(in_stack_map_) << "Call BeginStackMapEntry first";
DCHECK(!in_inline_info_) << "Mismatched Begin/End calls";
in_inline_info_ = true;
DCHECK_EQ(expected_num_dex_registers_, current_dex_registers_.size());
- InlineInfoEntry entry = {
- .is_last = InlineInfo::kMore,
- .dex_pc = dex_pc,
- .method_info_index = kNoValue,
- .art_method_hi = kNoValue,
- .art_method_lo = kNoValue,
- .dex_register_mask_index = kNoValue,
- .dex_register_map_index = kNoValue,
- };
+ expected_num_dex_registers_ += num_dex_registers;
+
+ BitTableBuilder<InlineInfo>::Entry entry;
+ entry[InlineInfo::kIsLast] = InlineInfo::kMore;
+ entry[InlineInfo::kDexPc] = dex_pc;
+ entry[InlineInfo::kNumberOfDexRegisters] = static_cast<uint32_t>(expected_num_dex_registers_);
if (EncodeArtMethodInInlineInfo(method)) {
- entry.art_method_hi = High32Bits(reinterpret_cast<uintptr_t>(method));
- entry.art_method_lo = Low32Bits(reinterpret_cast<uintptr_t>(method));
+ entry[InlineInfo::kArtMethodHi] = High32Bits(reinterpret_cast<uintptr_t>(method));
+ entry[InlineInfo::kArtMethodLo] = Low32Bits(reinterpret_cast<uintptr_t>(method));
} else {
if (dex_pc != static_cast<uint32_t>(-1) && kIsDebugBuild) {
ScopedObjectAccess soa(Thread::Current());
DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile()));
}
uint32_t dex_method_index = method->GetDexMethodIndexUnchecked();
- entry.method_info_index = method_infos_.Dedup(&dex_method_index);
+ entry[InlineInfo::kMethodInfoIndex] = method_infos_.Dedup({dex_method_index});
}
current_inline_infos_.push_back(entry);
- current_dex_registers_.clear();
- expected_num_dex_registers_ = num_dex_registers;
-
if (kVerifyStackMaps) {
size_t stack_map_index = stack_maps_.size();
size_t depth = current_inline_infos_.size() - 1;
dchecks_.emplace_back([=](const CodeInfo& code_info) {
StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth);
+ InlineInfo inline_info = code_info.GetInlineInfosOf(stack_map)[depth];
CHECK_EQ(inline_info.GetDexPc(), dex_pc);
bool encode_art_method = EncodeArtMethodInInlineInfo(method);
CHECK_EQ(inline_info.EncodesArtMethod(), encode_art_method);
if (encode_art_method) {
CHECK_EQ(inline_info.GetArtMethod(), method);
} else {
- CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()],
+ CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()][0],
method->GetDexMethodIndexUnchecked());
}
- CHECK_EQ(inline_info.HasDexRegisterMap(), (num_dex_registers != 0));
});
}
}
@@ -193,56 +229,68 @@ void StackMapStream::EndInlineInfoEntry() {
DCHECK_EQ(expected_num_dex_registers_, current_dex_registers_.size());
}
-// Create dex register map (bitmap + indices + catalogue entries)
-// based on the currently accumulated list of DexRegisterLocations.
+// Create delta-compressed dex register map based on the current list of DexRegisterLocations.
+// All dex registers for a stack map are concatenated - inlined registers are just appended.
void StackMapStream::CreateDexRegisterMap() {
- // Create mask and map based on current registers.
+ // These are fields rather than local variables so that we can reuse the reserved memory.
temp_dex_register_mask_.ClearAllBits();
temp_dex_register_map_.clear();
+
+ // Ensure that the arrays that hold previous state are big enough to be safely indexed below.
+ if (previous_dex_registers_.size() < current_dex_registers_.size()) {
+ previous_dex_registers_.resize(current_dex_registers_.size(), DexRegisterLocation::None());
+ dex_register_timestamp_.resize(current_dex_registers_.size(), 0u);
+ }
+
+ // Set bit in the mask for each register that has been changed since the previous stack map.
+ // Modified registers are stored in the catalogue and the catalogue index added to the list.
for (size_t i = 0; i < current_dex_registers_.size(); i++) {
DexRegisterLocation reg = current_dex_registers_[i];
- if (reg.IsLive()) {
- DexRegisterEntry entry = DexRegisterEntry {
- .kind = static_cast<uint32_t>(reg.GetKind()),
- .packed_value = DexRegisterInfo::PackValue(reg.GetKind(), reg.GetValue()),
- };
+ // Distance is difference between this index and the index of last modification.
+ uint32_t distance = stack_maps_.size() - dex_register_timestamp_[i];
+ if (previous_dex_registers_[i] != reg || distance > kMaxDexRegisterMapSearchDistance) {
+ BitTableBuilder<DexRegisterInfo>::Entry entry;
+ entry[DexRegisterInfo::kKind] = static_cast<uint32_t>(reg.GetKind());
+ entry[DexRegisterInfo::kPackedValue] =
+ DexRegisterInfo::PackValue(reg.GetKind(), reg.GetValue());
+ uint32_t index = reg.IsLive() ? dex_register_catalog_.Dedup(&entry) : kNoValue;
temp_dex_register_mask_.SetBit(i);
- temp_dex_register_map_.push_back(dex_register_catalog_.Dedup(&entry));
+ temp_dex_register_map_.push_back({index});
+ previous_dex_registers_[i] = reg;
+ dex_register_timestamp_[i] = stack_maps_.size();
}
}
- // Set the mask and map for the current StackMap/InlineInfo.
- uint32_t mask_index = StackMap::kNoValue; // Represents mask with all zero bits.
+ // Set the mask and map for the current StackMap (which includes inlined registers).
if (temp_dex_register_mask_.GetNumberOfBits() != 0) {
- mask_index = dex_register_masks_.Dedup(temp_dex_register_mask_.GetRawStorage(),
- temp_dex_register_mask_.GetNumberOfBits());
+ current_stack_map_[StackMap::kDexRegisterMaskIndex] =
+ dex_register_masks_.Dedup(temp_dex_register_mask_.GetRawStorage(),
+ temp_dex_register_mask_.GetNumberOfBits());
}
- uint32_t map_index = dex_register_maps_.Dedup(temp_dex_register_map_.data(),
- temp_dex_register_map_.size());
- if (!current_inline_infos_.empty()) {
- current_inline_infos_.back().dex_register_mask_index = mask_index;
- current_inline_infos_.back().dex_register_map_index = map_index;
- } else {
- current_stack_map_.dex_register_mask_index = mask_index;
- current_stack_map_.dex_register_map_index = map_index;
+ if (!current_dex_registers_.empty()) {
+ current_stack_map_[StackMap::kDexRegisterMapIndex] =
+ dex_register_maps_.Dedup(temp_dex_register_map_.data(),
+ temp_dex_register_map_.size());
}
if (kVerifyStackMaps) {
size_t stack_map_index = stack_maps_.size();
- int32_t depth = current_inline_infos_.size() - 1;
// We need to make copy of the current registers for later (when the check is run).
- auto expected_dex_registers = std::make_shared<std::vector<DexRegisterLocation>>(
+ auto expected_dex_registers = std::make_shared<dchecked_vector<DexRegisterLocation>>(
current_dex_registers_.begin(), current_dex_registers_.end());
dchecks_.emplace_back([=](const CodeInfo& code_info) {
StackMap stack_map = code_info.GetStackMapAt(stack_map_index);
- size_t num_dex_registers = expected_dex_registers->size();
- DexRegisterMap map = (depth == -1)
- ? code_info.GetDexRegisterMapOf(stack_map, num_dex_registers)
- : code_info.GetDexRegisterMapAtDepth(depth, stack_map, num_dex_registers);
- CHECK_EQ(map.size(), num_dex_registers);
- for (size_t r = 0; r < num_dex_registers; r++) {
- CHECK_EQ(expected_dex_registers->at(r), map.Get(r));
+ uint32_t expected_reg = 0;
+ for (DexRegisterLocation reg : code_info.GetDexRegisterMapOf(stack_map)) {
+ CHECK_EQ((*expected_dex_registers)[expected_reg++], reg);
}
+ for (InlineInfo inline_info : code_info.GetInlineInfosOf(stack_map)) {
+ DexRegisterMap map = code_info.GetInlineDexRegisterMapOf(stack_map, inline_info);
+ for (DexRegisterLocation reg : map) {
+ CHECK_EQ((*expected_dex_registers)[expected_reg++], reg);
+ }
+ }
+ CHECK_EQ(expected_reg, expected_dex_registers->size());
});
}
}
@@ -251,7 +299,7 @@ void StackMapStream::FillInMethodInfo(MemoryRegion region) {
{
MethodInfo info(region.begin(), method_infos_.size());
for (size_t i = 0; i < method_infos_.size(); ++i) {
- info.SetMethodIndex(i, method_infos_[i]);
+ info.SetMethodIndex(i, method_infos_[i][0]);
}
}
if (kVerifyStackMaps) {
@@ -260,53 +308,55 @@ void StackMapStream::FillInMethodInfo(MemoryRegion region) {
const size_t count = info.NumMethodIndices();
DCHECK_EQ(count, method_infos_.size());
for (size_t i = 0; i < count; ++i) {
- DCHECK_EQ(info.GetMethodIndex(i), method_infos_[i]);
+ DCHECK_EQ(info.GetMethodIndex(i), method_infos_[i][0]);
}
}
}
size_t StackMapStream::PrepareForFillIn() {
- static_assert(sizeof(StackMapEntry) == StackMap::kCount * sizeof(uint32_t), "Layout");
- static_assert(sizeof(InvokeInfoEntry) == InvokeInfo::kCount * sizeof(uint32_t), "Layout");
- static_assert(sizeof(InlineInfoEntry) == InlineInfo::kCount * sizeof(uint32_t), "Layout");
- static_assert(sizeof(DexRegisterEntry) == DexRegisterInfo::kCount * sizeof(uint32_t), "Layout");
DCHECK_EQ(out_.size(), 0u);
// Read the stack masks now. The compiler might have updated them.
for (size_t i = 0; i < lazy_stack_masks_.size(); i++) {
BitVector* stack_mask = lazy_stack_masks_[i];
if (stack_mask != nullptr && stack_mask->GetNumberOfBits() != 0) {
- stack_maps_[i].stack_mask_index =
+ stack_maps_[i][StackMap::kStackMaskIndex] =
stack_masks_.Dedup(stack_mask->GetRawStorage(), stack_mask->GetNumberOfBits());
}
}
- size_t bit_offset = 0;
- stack_maps_.Encode(&out_, &bit_offset);
- register_masks_.Encode(&out_, &bit_offset);
- stack_masks_.Encode(&out_, &bit_offset);
- invoke_infos_.Encode(&out_, &bit_offset);
- inline_infos_.Encode(&out_, &bit_offset);
- dex_register_masks_.Encode(&out_, &bit_offset);
- dex_register_maps_.Encode(&out_, &bit_offset);
- dex_register_catalog_.Encode(&out_, &bit_offset);
-
- return UnsignedLeb128Size(out_.size()) + out_.size();
+ EncodeUnsignedLeb128(&out_, frame_size_in_bytes_);
+ EncodeUnsignedLeb128(&out_, core_spill_mask_);
+ EncodeUnsignedLeb128(&out_, fp_spill_mask_);
+ EncodeUnsignedLeb128(&out_, num_dex_registers_);
+ BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
+ stack_maps_.Encode(out);
+ register_masks_.Encode(out);
+ stack_masks_.Encode(out);
+ invoke_infos_.Encode(out);
+ inline_infos_.Encode(out);
+ dex_register_masks_.Encode(out);
+ dex_register_maps_.Encode(out);
+ dex_register_catalog_.Encode(out);
+
+ return out_.size();
}
void StackMapStream::FillInCodeInfo(MemoryRegion region) {
DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
DCHECK_NE(0u, out_.size()) << "PrepareForFillIn not called before FillIn";
- DCHECK_EQ(region.size(), UnsignedLeb128Size(out_.size()) + out_.size());
+ DCHECK_EQ(region.size(), out_.size());
+
+ region.CopyFromVector(0, out_);
- uint8_t* ptr = EncodeUnsignedLeb128(region.begin(), out_.size());
- region.CopyFromVector(ptr - region.begin(), out_);
+ // Verify that we can load the CodeInfo and check some essentials.
+ CodeInfo code_info(region);
+ CHECK_EQ(code_info.Size(), out_.size());
+ CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
// Verify all written data (usually only in debug builds).
if (kVerifyStackMaps) {
- CodeInfo code_info(region);
- CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
for (const auto& dcheck : dchecks_) {
dcheck(code_info);
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index d634c703ff..ed865b12f7 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -27,11 +27,10 @@
#include "dex_register_location.h"
#include "method_info.h"
#include "nodes.h"
+#include "stack_map.h"
namespace art {
-class CodeInfo;
-
/**
* Collects and builds stack maps for a method. All the stack maps
* for a method are placed in a CodeInfo object.
@@ -51,23 +50,31 @@ class StackMapStream : public ValueObject {
out_(allocator->Adapter(kArenaAllocStackMapStream)),
method_infos_(allocator),
lazy_stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)),
- in_stack_map_(false),
- in_inline_info_(false),
+ current_stack_map_(),
current_inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
current_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
+ previous_dex_registers_(allocator->Adapter(kArenaAllocStackMapStream)),
+ dex_register_timestamp_(allocator->Adapter(kArenaAllocStackMapStream)),
temp_dex_register_mask_(allocator, 32, true, kArenaAllocStackMapStream),
temp_dex_register_map_(allocator->Adapter(kArenaAllocStackMapStream)) {
}
+ void BeginMethod(size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ uint32_t num_dex_registers);
+ void EndMethod();
+
void BeginStackMapEntry(uint32_t dex_pc,
uint32_t native_pc_offset,
- uint32_t register_mask,
- BitVector* sp_mask,
- uint32_t num_dex_registers,
- uint8_t inlining_depth);
+ uint32_t register_mask = 0,
+ BitVector* sp_mask = nullptr,
+ StackMap::Kind kind = StackMap::Kind::Default);
void EndStackMapEntry();
- void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value);
+ void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
+ current_dex_registers_.push_back(DexRegisterLocation(kind, value));
+ }
void AddInvoke(InvokeType type, uint32_t dex_method_index);
@@ -95,76 +102,42 @@ class StackMapStream : public ValueObject {
private:
static constexpr uint32_t kNoValue = -1;
- // The fields must be uint32_t and mirror the StackMap accessor in stack_map.h!
- struct StackMapEntry {
- uint32_t packed_native_pc;
- uint32_t dex_pc;
- uint32_t register_mask_index;
- uint32_t stack_mask_index;
- uint32_t inline_info_index;
- uint32_t dex_register_mask_index;
- uint32_t dex_register_map_index;
- };
-
- // The fields must be uint32_t and mirror the InlineInfo accessor in stack_map.h!
- struct InlineInfoEntry {
- uint32_t is_last;
- uint32_t dex_pc;
- uint32_t method_info_index;
- uint32_t art_method_hi;
- uint32_t art_method_lo;
- uint32_t dex_register_mask_index;
- uint32_t dex_register_map_index;
- };
-
- // The fields must be uint32_t and mirror the InvokeInfo accessor in stack_map.h!
- struct InvokeInfoEntry {
- uint32_t packed_native_pc;
- uint32_t invoke_type;
- uint32_t method_info_index;
- };
-
- // The fields must be uint32_t and mirror the DexRegisterInfo accessor in stack_map.h!
- struct DexRegisterEntry {
- uint32_t kind;
- uint32_t packed_value;
- };
-
- // The fields must be uint32_t and mirror the RegisterMask accessor in stack_map.h!
- struct RegisterMaskEntry {
- uint32_t value;
- uint32_t shift;
- };
-
void CreateDexRegisterMap();
const InstructionSet instruction_set_;
- BitTableBuilder<StackMapEntry> stack_maps_;
- BitTableBuilder<RegisterMaskEntry> register_masks_;
+ uint32_t frame_size_in_bytes_ = 0;
+ uint32_t core_spill_mask_ = 0;
+ uint32_t fp_spill_mask_ = 0;
+ uint32_t num_dex_registers_ = 0;
+ BitTableBuilder<StackMap> stack_maps_;
+ BitTableBuilder<RegisterMask> register_masks_;
BitmapTableBuilder stack_masks_;
- BitTableBuilder<InvokeInfoEntry> invoke_infos_;
- BitTableBuilder<InlineInfoEntry> inline_infos_;
+ BitTableBuilder<InvokeInfo> invoke_infos_;
+ BitTableBuilder<InlineInfo> inline_infos_;
BitmapTableBuilder dex_register_masks_;
- BitTableBuilder<uint32_t> dex_register_maps_;
- BitTableBuilder<DexRegisterEntry> dex_register_catalog_;
+ BitTableBuilder<MaskInfo> dex_register_maps_;
+ BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
ScopedArenaVector<uint8_t> out_;
- BitTableBuilder<uint32_t> method_infos_;
+ BitTableBuilderBase<1> method_infos_;
ScopedArenaVector<BitVector*> lazy_stack_masks_;
// Variables which track the current state between Begin/End calls;
- bool in_stack_map_;
- bool in_inline_info_;
- StackMapEntry current_stack_map_;
- ScopedArenaVector<InlineInfoEntry> current_inline_infos_;
+ bool in_method_ = false;
+ bool in_stack_map_ = false;
+ bool in_inline_info_ = false;
+ BitTableBuilder<StackMap>::Entry current_stack_map_;
+ ScopedArenaVector<BitTableBuilder<InlineInfo>::Entry> current_inline_infos_;
ScopedArenaVector<DexRegisterLocation> current_dex_registers_;
+ ScopedArenaVector<DexRegisterLocation> previous_dex_registers_;
+ ScopedArenaVector<uint32_t> dex_register_timestamp_; // Stack map index of last change.
size_t expected_num_dex_registers_;
// Temporary variables used in CreateDexRegisterMap.
// They are here so that we can reuse the reserved memory.
ArenaBitVector temp_dex_register_mask_;
- ScopedArenaVector<uint32_t> temp_dex_register_map_;
+ ScopedArenaVector<BitTableBuilder<DexRegisterMapInfo>::Entry> temp_dex_register_map_;
// A set of lambda functions to be executed at the end to verify
// the encoded data. It is generally only used in debug builds.
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 77aa3ef965..9ed90a4839 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -52,14 +52,16 @@ TEST(StackMapTest, Test1) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArenaBitVector sp_mask(&allocator, 0, false);
size_t number_of_dex_registers = 2;
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Short location.
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -81,16 +83,16 @@ TEST(StackMapTest, Test1) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_TRUE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1));
+ ASSERT_EQ(Kind::kInStack, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind());
+ ASSERT_EQ(0, dex_register_map[0].GetStackOffsetInBytes());
+ ASSERT_EQ(-2, dex_register_map[1].GetConstant());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1);
@@ -107,6 +109,7 @@ TEST(StackMapTest, Test2) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -114,7 +117,7 @@ TEST(StackMapTest, Test2) {
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
size_t number_of_dex_registers_in_inline_info = 0;
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, number_of_dex_registers, 2);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
@@ -126,7 +129,7 @@ TEST(StackMapTest, Test2) {
ArenaBitVector sp_mask2(&allocator, 0, true);
sp_mask2.SetBit(3);
sp_mask2.SetBit(8);
- stream.BeginStackMapEntry(1, 128 * kPcAlign, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(1, 128 * kPcAlign, 0xFF, &sp_mask2);
stream.AddDexRegisterEntry(Kind::kInRegister, 18); // Short location.
stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location.
stream.EndStackMapEntry();
@@ -134,7 +137,7 @@ TEST(StackMapTest, Test2) {
ArenaBitVector sp_mask3(&allocator, 0, true);
sp_mask3.SetBit(1);
sp_mask3.SetBit(5);
- stream.BeginStackMapEntry(2, 192 * kPcAlign, 0xAB, &sp_mask3, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(2, 192 * kPcAlign, 0xAB, &sp_mask3);
stream.AddDexRegisterEntry(Kind::kInRegister, 6); // Short location.
stream.AddDexRegisterEntry(Kind::kInRegisterHigh, 8); // Short location.
stream.EndStackMapEntry();
@@ -142,11 +145,12 @@ TEST(StackMapTest, Test2) {
ArenaBitVector sp_mask4(&allocator, 0, true);
sp_mask4.SetBit(6);
sp_mask4.SetBit(7);
- stream.BeginStackMapEntry(3, 256 * kPcAlign, 0xCD, &sp_mask4, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(3, 256 * kPcAlign, 0xCD, &sp_mask4);
stream.AddDexRegisterEntry(Kind::kInFpuRegister, 3); // Short location, same in stack map 2.
stream.AddDexRegisterEntry(Kind::kInFpuRegisterHigh, 1); // Short location.
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -170,16 +174,16 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_TRUE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInStack, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1));
+ ASSERT_EQ(Kind::kInStack, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind());
+ ASSERT_EQ(0, dex_register_map[0].GetStackOffsetInBytes());
+ ASSERT_EQ(-2, dex_register_map[1].GetConstant());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1);
@@ -189,13 +193,12 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(-2, location1.GetValue());
ASSERT_TRUE(stack_map.HasInlineInfo());
- InlineInfo inline_info0 = code_info.GetInlineInfoAtDepth(stack_map, 0);
- InlineInfo inline_info1 = code_info.GetInlineInfoAtDepth(stack_map, 1);
- ASSERT_EQ(2u, code_info.GetInlineDepthOf(stack_map));
- ASSERT_EQ(3u, inline_info0.GetDexPc());
- ASSERT_EQ(2u, inline_info1.GetDexPc());
- ASSERT_TRUE(inline_info0.EncodesArtMethod());
- ASSERT_TRUE(inline_info1.EncodesArtMethod());
+ auto inline_infos = code_info.GetInlineInfosOf(stack_map);
+ ASSERT_EQ(2u, inline_infos.size());
+ ASSERT_EQ(3u, inline_infos[0].GetDexPc());
+ ASSERT_EQ(2u, inline_infos[1].GetDexPc());
+ ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+ ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
}
// Second stack map.
@@ -210,16 +213,16 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask2));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_TRUE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(18, dex_register_map.GetMachineRegister(0));
- ASSERT_EQ(3, dex_register_map.GetMachineRegister(1));
+ ASSERT_EQ(Kind::kInRegister, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map[1].GetKind());
+ ASSERT_EQ(18, dex_register_map[0].GetMachineRegister());
+ ASSERT_EQ(3, dex_register_map[1].GetMachineRegister());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(2);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(3);
@@ -243,16 +246,16 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask3));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_TRUE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInRegister, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(6, dex_register_map.GetMachineRegister(0));
- ASSERT_EQ(8, dex_register_map.GetMachineRegister(1));
+ ASSERT_EQ(Kind::kInRegister, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kInRegisterHigh, dex_register_map[1].GetKind());
+ ASSERT_EQ(6, dex_register_map[0].GetMachineRegister());
+ ASSERT_EQ(8, dex_register_map[1].GetMachineRegister());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(4);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(5);
@@ -276,16 +279,16 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask4));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_TRUE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInFpuRegister, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(3, dex_register_map.GetMachineRegister(0));
- ASSERT_EQ(1, dex_register_map.GetMachineRegister(1));
+ ASSERT_EQ(Kind::kInFpuRegister, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kInFpuRegisterHigh, dex_register_map[1].GetKind());
+ ASSERT_EQ(3, dex_register_map[0].GetMachineRegister());
+ ASSERT_EQ(1, dex_register_map[1].GetMachineRegister());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(3);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(6);
@@ -303,6 +306,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -310,7 +314,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
sp_mask1.SetBit(4);
const size_t number_of_dex_registers = 2;
const size_t number_of_dex_registers_in_inline_info = 2;
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, number_of_dex_registers, 1);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kInStack, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.BeginInlineInfoEntry(&art_method, 3, number_of_dex_registers_in_inline_info);
@@ -319,6 +323,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
stream.EndInlineInfoEntry();
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -342,15 +347,16 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ASSERT_TRUE(CheckStackMask(code_info, stack_map, sp_mask1));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers));
- ASSERT_TRUE(map.IsDexRegisterLive(0));
- ASSERT_TRUE(map.IsDexRegisterLive(1));
+ DexRegisterMap map(code_info.GetDexRegisterMapOf(stack_map));
+ ASSERT_EQ(number_of_dex_registers, map.size());
+ ASSERT_TRUE(map[0].IsLive());
+ ASSERT_TRUE(map[1].IsLive());
ASSERT_EQ(2u, map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kInStack, map.GetLocationKind(0));
- ASSERT_EQ(Kind::kConstant, map.GetLocationKind(1));
- ASSERT_EQ(0, map.GetStackOffsetInBytes(0));
- ASSERT_EQ(-2, map.GetConstant(1));
+ ASSERT_EQ(Kind::kInStack, map[0].GetKind());
+ ASSERT_EQ(Kind::kConstant, map[1].GetKind());
+ ASSERT_EQ(0, map[0].GetStackOffsetInBytes());
+ ASSERT_EQ(-2, map[1].GetConstant());
DexRegisterLocation location0 = code_info.GetDexRegisterCatalogEntry(0);
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(1);
@@ -358,13 +364,6 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) {
ASSERT_EQ(Kind::kConstant, location1.GetKind());
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
-
- // Test that the inline info dex register map deduplicated to the same offset as the stack map
- // one.
- ASSERT_TRUE(stack_map.HasInlineInfo());
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, 0);
- EXPECT_EQ(inline_info.GetDexRegisterMapIndex(),
- stack_map.GetDexRegisterMapIndex());
}
}
@@ -373,14 +372,16 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
stream.AddDexRegisterEntry(Kind::kNone, 0); // No location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -400,15 +401,15 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ASSERT_EQ(0x3u, code_info.GetRegisterMaskOf(stack_map));
ASSERT_TRUE(stack_map.HasDexRegisterMap());
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
- ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ ASSERT_EQ(number_of_dex_registers, dex_register_map.size());
+ ASSERT_FALSE(dex_register_map[0].IsLive());
+ ASSERT_TRUE(dex_register_map[1].IsLive());
ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters());
- ASSERT_EQ(Kind::kNone, dex_register_map.GetLocationKind(0));
- ASSERT_EQ(Kind::kConstant, dex_register_map.GetLocationKind(1));
- ASSERT_EQ(-2, dex_register_map.GetConstant(1));
+ ASSERT_EQ(Kind::kNone, dex_register_map[0].GetKind());
+ ASSERT_EQ(Kind::kConstant, dex_register_map[1].GetKind());
+ ASSERT_EQ(-2, dex_register_map[1].GetConstant());
DexRegisterLocation location1 = code_info.GetDexRegisterCatalogEntry(0);
ASSERT_EQ(Kind::kConstant, location1.GetKind());
@@ -422,25 +423,27 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 2;
// First stack map.
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
// Second stack map, which should share the same dex register map.
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 65 * kPcAlign, 0x3, &sp_mask);
stream.AddDexRegisterEntry(Kind::kInRegister, 0); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
// Third stack map (doesn't share the dex register map).
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 66 * kPcAlign, 0x3, &sp_mask);
stream.AddDexRegisterEntry(Kind::kInRegister, 2); // Short location.
stream.AddDexRegisterEntry(Kind::kConstant, -2); // Large location.
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -450,29 +453,28 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
// Verify first stack map.
StackMap sm0 = ci.GetStackMapAt(0);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers0.GetMachineRegister(0));
- ASSERT_EQ(-2, dex_registers0.GetConstant(1));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0);
+ ASSERT_EQ(number_of_dex_registers, dex_registers0.size());
+ ASSERT_EQ(0, dex_registers0[0].GetMachineRegister());
+ ASSERT_EQ(-2, dex_registers0[1].GetConstant());
// Verify second stack map.
StackMap sm1 = ci.GetStackMapAt(1);
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers1.GetMachineRegister(0));
- ASSERT_EQ(-2, dex_registers1.GetConstant(1));
+ DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1);
+ ASSERT_EQ(number_of_dex_registers, dex_registers1.size());
+ ASSERT_EQ(0, dex_registers1[0].GetMachineRegister());
+ ASSERT_EQ(-2, dex_registers1[1].GetConstant());
// Verify third stack map.
StackMap sm2 = ci.GetStackMapAt(2);
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
- ASSERT_EQ(2, dex_registers2.GetMachineRegister(0));
- ASSERT_EQ(-2, dex_registers2.GetConstant(1));
-
- // Verify dex register map offsets.
- ASSERT_EQ(sm0.GetDexRegisterMapIndex(),
- sm1.GetDexRegisterMapIndex());
- ASSERT_NE(sm0.GetDexRegisterMapIndex(),
- sm2.GetDexRegisterMapIndex());
- ASSERT_NE(sm1.GetDexRegisterMapIndex(),
- sm2.GetDexRegisterMapIndex());
+ DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2);
+ ASSERT_EQ(number_of_dex_registers, dex_registers2.size());
+ ASSERT_EQ(2, dex_registers2[0].GetMachineRegister());
+ ASSERT_EQ(-2, dex_registers2[1].GetConstant());
+
+ // Verify dex register mask offsets.
+ ASSERT_FALSE(sm1.HasDexRegisterMaskIndex()); // No delta.
+ ASSERT_TRUE(sm2.HasDexRegisterMaskIndex()); // Has delta.
}
TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -480,17 +482,19 @@ TEST(StackMapTest, TestNoDexRegisterMap) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 1);
ArenaBitVector sp_mask(&allocator, 0, false);
uint32_t number_of_dex_registers = 0;
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask);
stream.EndStackMapEntry();
number_of_dex_registers = 1;
- stream.BeginStackMapEntry(1, 68 * kPcAlign, 0x4, &sp_mask, number_of_dex_registers, 0);
+ stream.BeginStackMapEntry(1, 68 * kPcAlign, 0x4, &sp_mask);
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -528,6 +532,7 @@ TEST(StackMapTest, InlineTest) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 2);
ArtMethod art_method;
ArenaBitVector sp_mask1(&allocator, 0, true);
@@ -535,7 +540,7 @@ TEST(StackMapTest, InlineTest) {
sp_mask1.SetBit(4);
// First stack map.
- stream.BeginStackMapEntry(0, 64 * kPcAlign, 0x3, &sp_mask1, 2, 2);
+ stream.BeginStackMapEntry(0, 10 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kInStack, 0);
stream.AddDexRegisterEntry(Kind::kConstant, 4);
@@ -551,7 +556,7 @@ TEST(StackMapTest, InlineTest) {
stream.EndStackMapEntry();
// Second stack map.
- stream.BeginStackMapEntry(2, 22 * kPcAlign, 0x3, &sp_mask1, 2, 3);
+ stream.BeginStackMapEntry(2, 22 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
@@ -569,13 +574,13 @@ TEST(StackMapTest, InlineTest) {
stream.EndStackMapEntry();
// Third stack map.
- stream.BeginStackMapEntry(4, 56 * kPcAlign, 0x3, &sp_mask1, 2, 0);
+ stream.BeginStackMapEntry(4, 56 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kNone, 0);
stream.AddDexRegisterEntry(Kind::kConstant, 4);
stream.EndStackMapEntry();
// Fourth stack map.
- stream.BeginStackMapEntry(6, 78 * kPcAlign, 0x3, &sp_mask1, 2, 3);
+ stream.BeginStackMapEntry(6, 78 * kPcAlign, 0x3, &sp_mask1);
stream.AddDexRegisterEntry(Kind::kInStack, 56);
stream.AddDexRegisterEntry(Kind::kConstant, 0);
@@ -591,6 +596,7 @@ TEST(StackMapTest, InlineTest) {
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -602,64 +608,66 @@ TEST(StackMapTest, InlineTest) {
// Verify first stack map.
StackMap sm0 = ci.GetStackMapAt(0);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, 2);
- ASSERT_EQ(0, dex_registers0.GetStackOffsetInBytes(0));
- ASSERT_EQ(4, dex_registers0.GetConstant(1));
-
- InlineInfo if0_0 = ci.GetInlineInfoAtDepth(sm0, 0);
- InlineInfo if0_1 = ci.GetInlineInfoAtDepth(sm0, 1);
- ASSERT_EQ(2u, ci.GetInlineDepthOf(sm0));
- ASSERT_EQ(2u, if0_0.GetDexPc());
- ASSERT_TRUE(if0_0.EncodesArtMethod());
- ASSERT_EQ(3u, if0_1.GetDexPc());
- ASSERT_TRUE(if0_1.EncodesArtMethod());
-
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm0, 1);
- ASSERT_EQ(8, dex_registers1.GetStackOffsetInBytes(0));
-
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm0, 3);
- ASSERT_EQ(16, dex_registers2.GetStackOffsetInBytes(0));
- ASSERT_EQ(20, dex_registers2.GetConstant(1));
- ASSERT_EQ(15, dex_registers2.GetMachineRegister(2));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0);
+ ASSERT_EQ(2u, dex_registers0.size());
+ ASSERT_EQ(0, dex_registers0[0].GetStackOffsetInBytes());
+ ASSERT_EQ(4, dex_registers0[1].GetConstant());
+
+ auto inline_infos = ci.GetInlineInfosOf(sm0);
+ ASSERT_EQ(2u, inline_infos.size());
+ ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+ ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+ ASSERT_EQ(3u, inline_infos[1].GetDexPc());
+ ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
+
+ DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm0, inline_infos[0]);
+ ASSERT_EQ(1u, dex_registers1.size());
+ ASSERT_EQ(8, dex_registers1[0].GetStackOffsetInBytes());
+
+ DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm0, inline_infos[1]);
+ ASSERT_EQ(3u, dex_registers2.size());
+ ASSERT_EQ(16, dex_registers2[0].GetStackOffsetInBytes());
+ ASSERT_EQ(20, dex_registers2[1].GetConstant());
+ ASSERT_EQ(15, dex_registers2[2].GetMachineRegister());
}
{
// Verify second stack map.
StackMap sm1 = ci.GetStackMapAt(1);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1, 2);
- ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0));
- ASSERT_EQ(0, dex_registers0.GetConstant(1));
-
- InlineInfo if1_0 = ci.GetInlineInfoAtDepth(sm1, 0);
- InlineInfo if1_1 = ci.GetInlineInfoAtDepth(sm1, 1);
- InlineInfo if1_2 = ci.GetInlineInfoAtDepth(sm1, 2);
- ASSERT_EQ(3u, ci.GetInlineDepthOf(sm1));
- ASSERT_EQ(2u, if1_0.GetDexPc());
- ASSERT_TRUE(if1_0.EncodesArtMethod());
- ASSERT_EQ(3u, if1_1.GetDexPc());
- ASSERT_TRUE(if1_1.EncodesArtMethod());
- ASSERT_EQ(5u, if1_2.GetDexPc());
- ASSERT_TRUE(if1_2.EncodesArtMethod());
-
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(0, sm1, 1);
- ASSERT_EQ(12, dex_registers1.GetStackOffsetInBytes(0));
-
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(1, sm1, 3);
- ASSERT_EQ(80, dex_registers2.GetStackOffsetInBytes(0));
- ASSERT_EQ(10, dex_registers2.GetConstant(1));
- ASSERT_EQ(5, dex_registers2.GetMachineRegister(2));
-
- ASSERT_FALSE(if1_2.HasDexRegisterMap());
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm1);
+ ASSERT_EQ(2u, dex_registers0.size());
+ ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes());
+ ASSERT_EQ(0, dex_registers0[1].GetConstant());
+
+ auto inline_infos = ci.GetInlineInfosOf(sm1);
+ ASSERT_EQ(3u, inline_infos.size());
+ ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+ ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+ ASSERT_EQ(3u, inline_infos[1].GetDexPc());
+ ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
+ ASSERT_EQ(5u, inline_infos[2].GetDexPc());
+ ASSERT_TRUE(inline_infos[2].EncodesArtMethod());
+
+ DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm1, inline_infos[0]);
+ ASSERT_EQ(1u, dex_registers1.size());
+ ASSERT_EQ(12, dex_registers1[0].GetStackOffsetInBytes());
+
+ DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm1, inline_infos[1]);
+ ASSERT_EQ(3u, dex_registers2.size());
+ ASSERT_EQ(80, dex_registers2[0].GetStackOffsetInBytes());
+ ASSERT_EQ(10, dex_registers2[1].GetConstant());
+ ASSERT_EQ(5, dex_registers2[2].GetMachineRegister());
}
{
// Verify third stack map.
StackMap sm2 = ci.GetStackMapAt(2);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, 2);
- ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
- ASSERT_EQ(4, dex_registers0.GetConstant(1));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2);
+ ASSERT_EQ(2u, dex_registers0.size());
+ ASSERT_FALSE(dex_registers0[0].IsLive());
+ ASSERT_EQ(4, dex_registers0[1].GetConstant());
ASSERT_FALSE(sm2.HasInlineInfo());
}
@@ -667,29 +675,28 @@ TEST(StackMapTest, InlineTest) {
// Verify fourth stack map.
StackMap sm3 = ci.GetStackMapAt(3);
- DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3, 2);
- ASSERT_EQ(56, dex_registers0.GetStackOffsetInBytes(0));
- ASSERT_EQ(0, dex_registers0.GetConstant(1));
-
- InlineInfo if2_0 = ci.GetInlineInfoAtDepth(sm3, 0);
- InlineInfo if2_1 = ci.GetInlineInfoAtDepth(sm3, 1);
- InlineInfo if2_2 = ci.GetInlineInfoAtDepth(sm3, 2);
- ASSERT_EQ(3u, ci.GetInlineDepthOf(sm3));
- ASSERT_EQ(2u, if2_0.GetDexPc());
- ASSERT_TRUE(if2_0.EncodesArtMethod());
- ASSERT_EQ(5u, if2_1.GetDexPc());
- ASSERT_TRUE(if2_1.EncodesArtMethod());
- ASSERT_EQ(10u, if2_2.GetDexPc());
- ASSERT_TRUE(if2_2.EncodesArtMethod());
-
- ASSERT_FALSE(if2_0.HasDexRegisterMap());
-
- DexRegisterMap dex_registers1 = ci.GetDexRegisterMapAtDepth(1, sm3, 1);
- ASSERT_EQ(2, dex_registers1.GetMachineRegister(0));
-
- DexRegisterMap dex_registers2 = ci.GetDexRegisterMapAtDepth(2, sm3, 2);
- ASSERT_FALSE(dex_registers2.IsDexRegisterLive(0));
- ASSERT_EQ(3, dex_registers2.GetMachineRegister(1));
+ DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm3);
+ ASSERT_EQ(2u, dex_registers0.size());
+ ASSERT_EQ(56, dex_registers0[0].GetStackOffsetInBytes());
+ ASSERT_EQ(0, dex_registers0[1].GetConstant());
+
+ auto inline_infos = ci.GetInlineInfosOf(sm3);
+ ASSERT_EQ(3u, inline_infos.size());
+ ASSERT_EQ(2u, inline_infos[0].GetDexPc());
+ ASSERT_TRUE(inline_infos[0].EncodesArtMethod());
+ ASSERT_EQ(5u, inline_infos[1].GetDexPc());
+ ASSERT_TRUE(inline_infos[1].EncodesArtMethod());
+ ASSERT_EQ(10u, inline_infos[2].GetDexPc());
+ ASSERT_TRUE(inline_infos[2].EncodesArtMethod());
+
+ DexRegisterMap dex_registers1 = ci.GetInlineDexRegisterMapOf(sm3, inline_infos[1]);
+ ASSERT_EQ(1u, dex_registers1.size());
+ ASSERT_EQ(2, dex_registers1[0].GetMachineRegister());
+
+ DexRegisterMap dex_registers2 = ci.GetInlineDexRegisterMapOf(sm3, inline_infos[2]);
+ ASSERT_EQ(2u, dex_registers2.size());
+ ASSERT_FALSE(dex_registers2[0].IsLive());
+ ASSERT_EQ(3, dex_registers2[1].GetMachineRegister());
}
}
@@ -726,15 +733,17 @@ TEST(StackMapTest, TestDeduplicateStackMask) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 0);
ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
sp_mask.SetBit(4);
- stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask, 0, 0);
+ stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask);
stream.EndStackMapEntry();
- stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask, 0, 0);
+ stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask);
stream.EndStackMapEntry();
+ stream.EndMethod();
size_t size = stream.PrepareForFillIn();
void* memory = allocator.Alloc(size, kArenaAllocMisc);
MemoryRegion region(memory, size);
@@ -754,19 +763,21 @@ TEST(StackMapTest, TestInvokeInfo) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
StackMapStream stream(&allocator, kRuntimeISA);
+ stream.BeginMethod(32, 0, 0, 0);
ArenaBitVector sp_mask(&allocator, 0, true);
sp_mask.SetBit(1);
- stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask, 0, 0);
+ stream.BeginStackMapEntry(0, 4 * kPcAlign, 0x3, &sp_mask);
stream.AddInvoke(kSuper, 1);
stream.EndStackMapEntry();
- stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask, 0, 0);
+ stream.BeginStackMapEntry(0, 8 * kPcAlign, 0x3, &sp_mask);
stream.AddInvoke(kStatic, 3);
stream.EndStackMapEntry();
- stream.BeginStackMapEntry(0, 16 * kPcAlign, 0x3, &sp_mask, 0, 0);
+ stream.BeginStackMapEntry(0, 16 * kPcAlign, 0x3, &sp_mask);
stream.AddInvoke(kDirect, 65535);
stream.EndStackMapEntry();
+ stream.EndMethod();
const size_t code_info_size = stream.PrepareForFillIn();
MemoryRegion code_info_region(allocator.Alloc(code_info_size, kArenaAllocMisc), code_info_size);
stream.FillInCodeInfo(code_info_region);
diff --git a/compiler/optimizing/superblock_cloner.cc b/compiler/optimizing/superblock_cloner.cc
index 1b43618538..878967cc6e 100644
--- a/compiler/optimizing/superblock_cloner.cc
+++ b/compiler/optimizing/superblock_cloner.cc
@@ -72,12 +72,12 @@ static bool ArePhiInputsTheSame(const HPhi* phi) {
// Returns whether two Edge sets are equal (ArenaHashSet doesn't have "Equal" method).
static bool EdgeHashSetsEqual(const HEdgeSet* set1, const HEdgeSet* set2) {
- if (set1->Size() != set2->Size()) {
+ if (set1->size() != set2->size()) {
return false;
}
for (auto e : *set1) {
- if (set2->Find(e) == set2->end()) {
+ if (set2->find(e) == set2->end()) {
return false;
}
}
@@ -472,8 +472,8 @@ void SuperblockCloner::RemapEdgesSuccessors() {
continue;
}
- auto orig_redir = remap_orig_internal_->Find(HEdge(orig_block_id, orig_succ_id));
- auto copy_redir = remap_copy_internal_->Find(HEdge(orig_block_id, orig_succ_id));
+ auto orig_redir = remap_orig_internal_->find(HEdge(orig_block_id, orig_succ_id));
+ auto copy_redir = remap_copy_internal_->find(HEdge(orig_block_id, orig_succ_id));
// Due to construction all successors of copied block were set to original.
if (copy_redir != remap_copy_internal_->end()) {
@@ -864,9 +864,9 @@ bool SuperblockCloner::IsFastCase() const {
EdgeHashSetsEqual(&remap_copy_internal, remap_copy_internal_) &&
EdgeHashSetsEqual(&remap_incoming, remap_incoming_);
- remap_orig_internal.Clear();
- remap_copy_internal.Clear();
- remap_incoming.Clear();
+ remap_orig_internal.clear();
+ remap_copy_internal.clear();
+ remap_incoming.clear();
// Check whether remapping info corresponds to loop peeling.
CollectRemappingInfoForPeelUnroll(/* to_unroll*/ false,
@@ -1022,16 +1022,16 @@ void CollectRemappingInfoForPeelUnroll(bool to_unroll,
for (HBasicBlock* back_edge_block : loop_info->GetBackEdges()) {
HEdge e = HEdge(back_edge_block, loop_header);
if (to_unroll) {
- remap_orig_internal->Insert(e);
- remap_copy_internal->Insert(e);
+ remap_orig_internal->insert(e);
+ remap_copy_internal->insert(e);
} else {
- remap_copy_internal->Insert(e);
+ remap_copy_internal->insert(e);
}
}
// Set up remap_incoming edges set.
if (!to_unroll) {
- remap_incoming->Insert(HEdge(loop_info->GetPreHeader(), loop_header));
+ remap_incoming->insert(HEdge(loop_info->GetPreHeader(), loop_header));
}
}
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index df2e517aff..31114b6dcc 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -30,38 +30,8 @@ using HEdgeSet = SuperblockCloner::HEdgeSet;
// This class provides methods and helpers for testing various cloning and copying routines:
// individual instruction cloning and cloning of the more coarse-grain structures.
-class SuperblockClonerTest : public OptimizingUnitTest {
+class SuperblockClonerTest : public ImprovedOptimizingUnitTest {
public:
- SuperblockClonerTest() : graph_(CreateGraph()),
- entry_block_(nullptr),
- return_block_(nullptr),
- exit_block_(nullptr),
- parameter_(nullptr) {}
-
- void InitGraph() {
- entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
- graph_->AddBlock(entry_block_);
- graph_->SetEntryBlock(entry_block_);
-
- return_block_ = new (GetAllocator()) HBasicBlock(graph_);
- graph_->AddBlock(return_block_);
-
- exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
- graph_->AddBlock(exit_block_);
- graph_->SetExitBlock(exit_block_);
-
- entry_block_->AddSuccessor(return_block_);
- return_block_->AddSuccessor(exit_block_);
-
- parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
- dex::TypeIndex(0),
- 0,
- DataType::Type::kInt32);
- entry_block_->AddInstruction(parameter_);
- return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
- exit_block_->AddInstruction(new (GetAllocator()) HExit());
- }
-
void CreateBasicLoopControlFlow(HBasicBlock* position,
HBasicBlock* successor,
/* out */ HBasicBlock** header_p,
@@ -137,40 +107,6 @@ class SuperblockClonerTest : public OptimizingUnitTest {
null_check->CopyEnvironmentFrom(env);
bounds_check->CopyEnvironmentFrom(env);
}
-
- HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction,
- ArenaVector<HInstruction*>* current_locals) {
- HEnvironment* environment = new (GetAllocator()) HEnvironment(
- (GetAllocator()),
- current_locals->size(),
- graph_->GetArtMethod(),
- instruction->GetDexPc(),
- instruction);
-
- environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals));
- instruction->SetRawEnvironment(environment);
- return environment;
- }
-
- bool CheckGraph() {
- GraphChecker checker(graph_);
- checker.Run();
- if (!checker.IsValid()) {
- for (const std::string& error : checker.GetErrors()) {
- std::cout << error << std::endl;
- }
- return false;
- }
- return true;
- }
-
- HGraph* graph_;
-
- HBasicBlock* entry_block_;
- HBasicBlock* return_block_;
- HBasicBlock* exit_block_;
-
- HInstruction* parameter_;
};
TEST_F(SuperblockClonerTest, IndividualInstrCloner) {
@@ -708,8 +644,8 @@ TEST_F(SuperblockClonerTest, FastCaseCheck) {
orig_bb_set.SetBit(preheader->GetBlockId());
// Adjust incoming edges.
- remap_incoming.Clear();
- remap_incoming.Insert(HEdge(preheader->GetSinglePredecessor(), preheader));
+ remap_incoming.clear();
+ remap_incoming.insert(HEdge(preheader->GetSinglePredecessor(), preheader));
HBasicBlockMap bb_map(std::less<HBasicBlock*>(), arena->Adapter(kArenaAllocSuperblockCloner));
HInstructionMap hir_map(std::less<HInstruction*>(), arena->Adapter(kArenaAllocSuperblockCloner));
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 2c428fac7e..c6c764e3a9 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -120,11 +120,10 @@ void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
// Write out entry spills.
int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ for (const ManagedRegisterSpill& spill : entry_spills) {
+ ArmManagedRegister reg = spill.AsArm();
if (reg.IsNoRegister()) {
// only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
offset += spill.getSize();
} else if (reg.IsCoreRegister()) {
asm_.StoreToOffset(kStoreWord, AsVIXLRegister(reg), sp, offset);
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index a5aa1c12b3..d6ce03387c 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -719,11 +719,10 @@ void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
// Write out entry spills
int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
+ for (const ManagedRegisterSpill& spill : entry_spills) {
+ Arm64ManagedRegister reg = spill.AsArm64();
if (reg.IsNoRegister()) {
// only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
offset += spill.getSize();
} else if (reg.IsXRegister()) {
StoreToOffset(reg.AsXRegister(), SP, offset);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 19c405e517..e76e98a2a3 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -153,7 +153,7 @@ const char* const VixlJniHelpersResults[] = {
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2cc ldr.w ip, [r9, #716] ; 0x2cc\n",
+ " 224: f8d9 c2d0 ldr.w ip, [r9, #720] ; 0x2d0\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
index c866504e62..4e892f2616 100644
--- a/compiler/utils/dedupe_set-inl.h
+++ b/compiler/utils/dedupe_set-inl.h
@@ -71,13 +71,13 @@ class DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Shard {
const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
MutexLock lock(self, lock_);
HashedKey<InKey> hashed_in_key(hash, &in_key);
- auto it = keys_.Find(hashed_in_key);
+ auto it = keys_.find(hashed_in_key);
if (it != keys_.end()) {
DCHECK(it->Key() != nullptr);
return it->Key();
}
const StoreKey* store_key = alloc_.Copy(in_key);
- keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+ keys_.insert(HashedKey<StoreKey> { hash, store_key });
return store_key;
}
@@ -90,7 +90,7 @@ class DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Shard {
// Note: The total_probe_distance will be updated with the current state.
// It may have been higher before a re-hash.
global_stats->total_probe_distance += keys_.TotalProbeDistance();
- global_stats->total_size += keys_.Size();
+ global_stats->total_size += keys_.size();
for (const HashedKey<StoreKey>& key : keys_) {
auto it = stats.find(key.Hash());
if (it == stats.end()) {
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 2b7b2aa7ce..db9c36cc75 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -101,11 +101,11 @@ class ManagedRegisterSpill : public ManagedRegister {
ManagedRegisterSpill(const ManagedRegister& other, int32_t size)
: ManagedRegister(other), size_(size), spill_offset_(-1) { }
- int32_t getSpillOffset() {
+ int32_t getSpillOffset() const {
return spill_offset_;
}
- int32_t getSize() {
+ int32_t getSize() const {
return size_;
}
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index dce5b95fec..c0b6f988d4 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -4801,10 +4801,9 @@ void MipsAssembler::BuildFrame(size_t frame_size,
// Write out entry spills.
int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- MipsManagedRegister reg = entry_spills.at(i).AsMips();
+ for (const ManagedRegisterSpill& spill : entry_spills) {
+ MipsManagedRegister reg = spill.AsMips();
if (reg.IsNoRegister()) {
- ManagedRegisterSpill spill = entry_spills.at(i);
offset += spill.getSize();
} else if (reg.IsCoreRegister()) {
StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index bb1bb82fa5..5b1c5d9e01 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -3633,9 +3633,8 @@ void Mips64Assembler::BuildFrame(size_t frame_size,
// Write out entry spills.
int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- Mips64ManagedRegister reg = entry_spills[i].AsMips64();
- ManagedRegisterSpill spill = entry_spills.at(i);
+ for (const ManagedRegisterSpill& spill : entry_spills) {
+ Mips64ManagedRegister reg = spill.AsMips64();
int32_t size = spill.getSize();
if (reg.IsNoRegister()) {
// only increment stack offset.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 86f9010ea3..c2ce03b1f2 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -525,6 +525,58 @@ void X86Assembler::divss(XmmRegister dst, const Address& src) {
EmitOperand(dst, src);
}
+void X86Assembler::vfmadd231ps(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(false, false, false, 2);
+ uint8_t byte_two = EmitVexByte2(false, 128, X86ManagedRegister::FromXmmRegister(mul_left), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field.
+ EmitUint8(0xB8);
+ EmitXmmRegisterOperand(acc, mul_right);
+}
+
+void X86Assembler::vfmsub231ps(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(false, false, false, 2);
+ uint8_t byte_two = EmitVexByte2(false, 128, X86ManagedRegister::FromXmmRegister(mul_left), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field.
+ EmitUint8(0xBA);
+ EmitXmmRegisterOperand(acc, mul_right);
+}
+
+void X86Assembler::vfmadd231pd(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(false, false, false, 2);
+ uint8_t byte_two = EmitVexByte2(true, 128, X86ManagedRegister::FromXmmRegister(mul_left), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field.
+ EmitUint8(0xB8);
+ EmitXmmRegisterOperand(acc, mul_right);
+}
+
+void X86Assembler::vfmsub231pd(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(false, false, false, 2);
+ uint8_t byte_two = EmitVexByte2(true, 128, X86ManagedRegister::FromXmmRegister(mul_left), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field.
+ EmitUint8(0xBA);
+ EmitXmmRegisterOperand(acc, mul_right);
+}
+
void X86Assembler::addps(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -2898,6 +2950,99 @@ void X86Assembler::EmitLabelLink(NearLabel* label) {
}
+uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) {
+ // VEX Byte 1.
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R .
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X .
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B .
+ }
+
+ // VEX.mmmmm.
+ switch (mmmmm) {
+ case 1:
+ // Implied 0F leading opcode byte.
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // Implied leading 0F 38 opcode byte.
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // Implied leading OF 3A opcode byte.
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+ return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) {
+ uint8_t vex_prefix = 0;
+ // VEX Byte 2.
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+
+ // VEX.vvvv.
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ Register vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv);
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L.
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp.
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None.
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66.
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3.
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2.
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
+
void X86Assembler::EmitGenericShift(int reg_or_opcode,
const Operand& operand,
const Immediate& imm) {
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e42c4c986a..8c9ce82687 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -397,6 +397,12 @@ class X86Assembler FINAL : public Assembler {
void divss(XmmRegister dst, XmmRegister src);
void divss(XmmRegister dst, const Address& src);
+ // FMA Mac Instructions
+ void vfmadd231ps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+ void vfmadd231pd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+ void vfmsub231ps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+ void vfmsub231pd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void subps(XmmRegister dst, XmmRegister src);
void mulps(XmmRegister dst, XmmRegister src);
@@ -834,6 +840,11 @@ class X86Assembler FINAL : public Assembler {
void EmitLabelLink(Label* label);
void EmitLabelLink(NearLabel* label);
+ // Emit a 3 byte VEX Prefix
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86ManagedRegister vvv, int pp);
+
void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
void EmitGenericShift(int rm, const Operand& operand, Register shifter);
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 7e29c4aa26..dd99f03aa7 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -67,8 +67,7 @@ void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
cfi().AdjustCFAOffset(kFramePointerSize);
DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
+ for (const ManagedRegisterSpill& spill : entry_spills) {
if (spill.AsX86().IsCpuRegister()) {
int offset = frame_size + spill.getSpillOffset();
__ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index bd31561937..9983eaeeea 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -603,6 +603,56 @@ void X86_64Assembler::divss(XmmRegister dst, const Address& src) {
}
+void X86_64Assembler::vfmadd231ps(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(acc.NeedsRex(), false, mul_right.NeedsRex(), 2);
+ uint8_t byte_two = EmitVexByte2(false, 128, X86_64ManagedRegister::FromXmmRegister(mul_left.AsFloatRegister()), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field.
+ EmitUint8(0xB8);
+ EmitXmmRegisterOperand(acc.LowBits(), mul_right);
+}
+
+
+void X86_64Assembler::vfmsub231ps(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(acc.NeedsRex(), false, mul_right.NeedsRex(), 2);
+ uint8_t byte_two = EmitVexByte2(false, 128, X86_64ManagedRegister::FromXmmRegister(mul_left.AsFloatRegister()), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ // Opcode field
+ EmitUint8(0xBA);
+ EmitXmmRegisterOperand(acc.LowBits(), mul_right);
+}
+
+void X86_64Assembler::vfmadd231pd(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(acc.NeedsRex(), false, mul_right.NeedsRex(), 2);
+ uint8_t byte_two = EmitVexByte2(true, 128, X86_64ManagedRegister::FromXmmRegister(mul_left.AsFloatRegister()), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xB8);
+ EmitXmmRegisterOperand(acc.LowBits(), mul_right);
+}
+
+void X86_64Assembler::vfmsub231pd(XmmRegister acc, XmmRegister mul_left, XmmRegister mul_right) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ uint8_t byte_zero = EmitVexByteZero(false /*is_two_byte*/);
+ uint8_t byte_one = EmitVexByte1(acc.NeedsRex(), false, mul_right.NeedsRex(), 2);
+ uint8_t byte_two = EmitVexByte2(true, 128, X86_64ManagedRegister::FromXmmRegister(mul_left.AsFloatRegister()), 1);
+ EmitUint8(byte_zero);
+ EmitUint8(byte_one);
+ EmitUint8(byte_two);
+ EmitUint8(0xBA);
+ EmitXmmRegisterOperand(acc.LowBits(), mul_right);
+}
void X86_64Assembler::addps(XmmRegister dst, XmmRegister src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitOptionalRex32(dst, src);
@@ -3544,6 +3594,98 @@ void X86_64Assembler::EmitLabelLink(NearLabel* label) {
label->LinkTo(position);
}
+uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) {
+ uint8_t vex_zero = 0xC0;
+ if (!is_two_byte) {
+ vex_zero |= 0xC4;
+ } else {
+ vex_zero |= 0xC5;
+ }
+ return vex_zero;
+}
+
+uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) {
+ // VEX Byte 1.
+ uint8_t vex_prefix = 0;
+ if (!r) {
+ vex_prefix |= 0x80; // VEX.R .
+ }
+ if (!x) {
+ vex_prefix |= 0x40; // VEX.X .
+ }
+ if (!b) {
+ vex_prefix |= 0x20; // VEX.B .
+ }
+
+ // VEX.mmmmm.
+ switch (mmmmm) {
+ case 1:
+ // Implied 0F leading opcode byte.
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // Implied leading 0F 38 opcode byte.
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // Implied leading OF 3A opcode byte.
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown opcode bytes";
+ }
+
+ return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) {
+ // VEX Byte 2.
+ uint8_t vex_prefix = 0;
+ if (w) {
+ vex_prefix |= 0x80;
+ }
+ // VEX.vvvv.
+ if (operand.IsXmmRegister()) {
+ XmmRegister vvvv = operand.AsXmmRegister();
+ int inverted_reg = 15-static_cast<int>(vvvv.AsFloatRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ } else if (operand.IsCpuRegister()) {
+ CpuRegister vvvv = operand.AsCpuRegister();
+ int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+ uint8_t reg = static_cast<uint8_t>(inverted_reg);
+ vex_prefix |= ((reg & 0x0F) << 3);
+ }
+
+ // VEX.L.
+ if (l == 256) {
+ vex_prefix |= 0x04;
+ }
+
+ // VEX.pp.
+ switch (pp) {
+ case 0:
+ // SIMD Pefix - None.
+ vex_prefix |= 0x00;
+ break;
+ case 1:
+ // SIMD Prefix - 66.
+ vex_prefix |= 0x01;
+ break;
+ case 2:
+ // SIMD Prefix - F3.
+ vex_prefix |= 0x02;
+ break;
+ case 3:
+ // SIMD Prefix - F2.
+ vex_prefix |= 0x03;
+ break;
+ default:
+ LOG(FATAL) << "unknown SIMD Prefix";
+ }
+
+ return vex_prefix;
+}
void X86_64Assembler::EmitGenericShift(bool wide,
int reg_or_opcode,
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index e4d72a7ba2..d5779aa786 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -436,6 +436,16 @@ class X86_64Assembler FINAL : public Assembler {
void divss(XmmRegister dst, XmmRegister src);
void divss(XmmRegister dst, const Address& src);
+ // Mac Instructions
+ // For reference look at the Instruction reference volume 2C.
+ // The below URL is broken down in two lines.
+ // https://www.intel.com/content/www/us/en/architecture-and-technology/
+ // 64-ia-32-architectures-software-developer-vol-2c-manual.html
+ void vfmadd231ps(XmmRegister acc, XmmRegister left, XmmRegister right);
+ void vfmadd231pd(XmmRegister acc, XmmRegister left, XmmRegister right);
+ void vfmsub231ps(XmmRegister acc, XmmRegister left, XmmRegister right);
+ void vfmsub231pd(XmmRegister acc, XmmRegister left, XmmRegister right);
+
void addps(XmmRegister dst, XmmRegister src); // no addr variant (for now)
void subps(XmmRegister dst, XmmRegister src);
void mulps(XmmRegister dst, XmmRegister src);
@@ -921,6 +931,11 @@ class X86_64Assembler FINAL : public Assembler {
void EmitLabelLink(Label* label);
void EmitLabelLink(NearLabel* label);
+ // Emit a 3 byte VEX Prefix.
+ uint8_t EmitVexByteZero(bool is_two_byte);
+ uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
+ uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
+
void EmitGenericShift(bool wide, int rm, CpuRegister reg, const Immediate& imm);
void EmitGenericShift(bool wide, int rm, CpuRegister operand, CpuRegister shifter);
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 9486cb44c5..f6b2f9df34 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -75,8 +75,7 @@ void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
__ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
+ for (const ManagedRegisterSpill& spill : entry_spills) {
if (spill.AsX86_64().IsCpuRegister()) {
if (spill.getSize() == 8) {
__ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc
index 3fe2ec0ac0..c223549710 100644
--- a/compiler/verifier_deps_test.cc
+++ b/compiler/verifier_deps_test.cc
@@ -129,7 +129,7 @@ class VerifierDepsTest : public CommonCompilerTest {
for (const DexFile* dex_file : dex_files_) {
compiler_driver_->GetVerificationResults()->AddDexFile(dex_file);
}
- compiler_driver_->SetDexFilesForOatFile(dex_files_);
+ SetDexFilesForOatFile(dex_files_);
}
void LoadDexFile(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6b65aca943..cbc6424466 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -27,7 +27,6 @@
#include <sstream>
#include <string>
#include <type_traits>
-#include <unordered_set>
#include <vector>
#if defined(__linux__) && defined(__arm__)
@@ -291,7 +290,7 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" Default: default");
UsageError("");
UsageError(" --compile-pic: Force indirect use of code, methods, and classes");
- UsageError(" Default: disabled");
+ UsageError(" Default: disabled for apps (ignored for boot image which is always PIC)");
UsageError("");
UsageError(" --compiler-backend=(Quick|Optimizing): select compiler backend");
UsageError(" set.");
@@ -611,7 +610,6 @@ class Dex2Oat FINAL {
public:
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
- instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA),
// Take the default set of instruction features from the build.
image_file_location_oat_checksum_(0),
image_file_location_oat_data_begin_(0),
@@ -643,7 +641,6 @@ class Dex2Oat FINAL {
driver_(nullptr),
opened_dex_files_maps_(),
opened_dex_files_(),
- no_inline_from_dex_files_(),
avoid_storing_invocation_(false),
swap_fd_(kInvalidFd),
app_image_fd_(kInvalidFd),
@@ -657,7 +654,7 @@ class Dex2Oat FINAL {
// the runtime.
LogCompletionTime();
- if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
// We want to just exit on non-debug builds, not bringing the runtime down
// in an orderly fashion. So release the following fields.
driver_.release();
@@ -698,35 +695,39 @@ class Dex2Oat FINAL {
}
bool VerifyProfileData() {
- return profile_compilation_info_->VerifyProfileData(dex_files_);
+ return profile_compilation_info_->VerifyProfileData(compiler_options_->dex_files_for_oat_file_);
}
void ParseInstructionSetVariant(const std::string& option, ParserOptions* parser_options) {
- instruction_set_features_ = InstructionSetFeatures::FromVariant(
- instruction_set_, option, &parser_options->error_msg);
- if (instruction_set_features_.get() == nullptr) {
+ compiler_options_->instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ compiler_options_->instruction_set_, option, &parser_options->error_msg);
+ if (compiler_options_->instruction_set_features_ == nullptr) {
Usage("%s", parser_options->error_msg.c_str());
}
}
void ParseInstructionSetFeatures(const std::string& option, ParserOptions* parser_options) {
- if (instruction_set_features_ == nullptr) {
- instruction_set_features_ = InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg);
- if (instruction_set_features_.get() == nullptr) {
+ if (compiler_options_->instruction_set_features_ == nullptr) {
+ compiler_options_->instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ compiler_options_->instruction_set_, "default", &parser_options->error_msg);
+ if (compiler_options_->instruction_set_features_ == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
}
}
- instruction_set_features_ =
- instruction_set_features_->AddFeaturesFromString(option, &parser_options->error_msg);
- if (instruction_set_features_ == nullptr) {
+ compiler_options_->instruction_set_features_ =
+ compiler_options_->instruction_set_features_->AddFeaturesFromString(
+ option, &parser_options->error_msg);
+ if (compiler_options_->instruction_set_features_ == nullptr) {
Usage("Error parsing '%s': %s", option.c_str(), parser_options->error_msg.c_str());
}
}
void ProcessOptions(ParserOptions* parser_options) {
compiler_options_->boot_image_ = !image_filenames_.empty();
+ if (compiler_options_->boot_image_) {
+ compiler_options_->compile_pic_ = true;
+ }
compiler_options_->app_image_ = app_image_fd_ != -1 || !app_image_file_name_.empty();
if (IsBootImage() && image_filenames_.size() == 1) {
@@ -872,23 +873,23 @@ class Dex2Oat FINAL {
// If no instruction set feature was given, use the default one for the target
// instruction set.
- if (instruction_set_features_.get() == nullptr) {
- instruction_set_features_ = InstructionSetFeatures::FromVariant(
- instruction_set_, "default", &parser_options->error_msg);
- if (instruction_set_features_.get() == nullptr) {
+ if (compiler_options_->instruction_set_features_.get() == nullptr) {
+ compiler_options_->instruction_set_features_ = InstructionSetFeatures::FromVariant(
+ compiler_options_->instruction_set_, "default", &parser_options->error_msg);
+ if (compiler_options_->instruction_set_features_ == nullptr) {
Usage("Problem initializing default instruction set features variant: %s",
parser_options->error_msg.c_str());
}
}
- if (instruction_set_ == kRuntimeISA) {
+ if (compiler_options_->instruction_set_ == kRuntimeISA) {
std::unique_ptr<const InstructionSetFeatures> runtime_features(
InstructionSetFeatures::FromCppDefines());
- if (!instruction_set_features_->Equals(runtime_features.get())) {
+ if (!compiler_options_->GetInstructionSetFeatures()->Equals(runtime_features.get())) {
LOG(WARNING) << "Mismatch between dex2oat instruction set features ("
- << *instruction_set_features_ << ") and those of dex2oat executable ("
- << *runtime_features <<") for the command line:\n"
- << CommandLine();
+ << *compiler_options_->GetInstructionSetFeatures()
+ << ") and those of dex2oat executable (" << *runtime_features
+ << ") for the command line:\n" << CommandLine();
}
}
@@ -898,7 +899,7 @@ class Dex2Oat FINAL {
// Checks are all explicit until we know the architecture.
// Set the compilation target's implicit checks options.
- switch (instruction_set_) {
+ switch (compiler_options_->GetInstructionSet()) {
case InstructionSet::kArm:
case InstructionSet::kThumb2:
case InstructionSet::kArm64:
@@ -954,9 +955,9 @@ class Dex2Oat FINAL {
compiler_options_->force_determinism_ = force_determinism_;
if (passes_to_run_filename_ != nullptr) {
- passes_to_run_.reset(ReadCommentedInputFromFile<std::vector<std::string>>(
+ passes_to_run_ = ReadCommentedInputFromFile<std::vector<std::string>>(
passes_to_run_filename_,
- nullptr)); // No post-processing.
+ nullptr); // No post-processing.
if (passes_to_run_.get() == nullptr) {
Usage("Failed to read list of passes to run.");
}
@@ -1217,10 +1218,10 @@ class Dex2Oat FINAL {
AssignIfExists(args, M::Backend, &compiler_kind_);
parser_options->requested_specific_compiler = args.Exists(M::Backend);
- AssignIfExists(args, M::TargetInstructionSet, &instruction_set_);
+ AssignIfExists(args, M::TargetInstructionSet, &compiler_options_->instruction_set_);
// arm actually means thumb2.
- if (instruction_set_ == InstructionSet::kArm) {
- instruction_set_ = InstructionSet::kThumb2;
+ if (compiler_options_->instruction_set_ == InstructionSet::kArm) {
+ compiler_options_->instruction_set_ = InstructionSet::kThumb2;
}
AssignTrueIfExists(args, M::Host, &is_host_);
@@ -1490,23 +1491,21 @@ class Dex2Oat FINAL {
if (!IsImage()) {
return;
}
- // If we don't have a profile, treat it as an empty set of classes. b/77340429
- if (image_classes_ == nullptr) {
- // May be non-null when --image-classes is passed in, in that case avoid clearing the list.
- image_classes_.reset(new std::unordered_set<std::string>());
- }
if (profile_compilation_info_ != nullptr) {
+ // TODO: The following comment looks outdated or misplaced.
// Filter out class path classes since we don't want to include these in the image.
- image_classes_.reset(
- new std::unordered_set<std::string>(
- profile_compilation_info_->GetClassDescriptors(dex_files_)));
- VLOG(compiler) << "Loaded " << image_classes_->size()
+ HashSet<std::string> image_classes = profile_compilation_info_->GetClassDescriptors(
+ compiler_options_->dex_files_for_oat_file_);
+ VLOG(compiler) << "Loaded " << image_classes.size()
<< " image class descriptors from profile";
if (VLOG_IS_ON(compiler)) {
- for (const std::string& s : *image_classes_) {
+ for (const std::string& s : image_classes) {
LOG(INFO) << "Image class " << s;
}
}
+ // Note: If we have a profile, classes previously loaded for the --image-classes
+ // option are overwritten here.
+ compiler_options_->image_classes_.swap(image_classes);
}
}
@@ -1632,8 +1631,6 @@ class Dex2Oat FINAL {
if (!oat_writers_[i]->WriteAndOpenDexFiles(
vdex_files_[i].get(),
rodata_.back(),
- instruction_set_,
- instruction_set_features_.get(),
key_value_store_.get(),
verify,
update_input_vdex_,
@@ -1657,10 +1654,11 @@ class Dex2Oat FINAL {
}
}
- dex_files_ = MakeNonOwningPointerVector(opened_dex_files_);
+ compiler_options_->dex_files_for_oat_file_ = MakeNonOwningPointerVector(opened_dex_files_);
+ const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
// If we need to downgrade the compiler-filter for size reasons.
- if (!IsBootImage() && IsVeryLarge(dex_files_)) {
+ if (!IsBootImage() && IsVeryLarge(dex_files)) {
// Disable app image to make sure dex2oat unloading is enabled.
compiler_options_->DisableAppImage();
@@ -1693,7 +1691,7 @@ class Dex2Oat FINAL {
CHECK(driver_ == nullptr);
// If we use a swap file, ensure we are above the threshold to make it necessary.
if (swap_fd_ != -1) {
- if (!UseSwap(IsBootImage(), dex_files_)) {
+ if (!UseSwap(IsBootImage(), dex_files)) {
close(swap_fd_);
swap_fd_ = -1;
VLOG(compiler) << "Decided to run without swap.";
@@ -1736,7 +1734,7 @@ class Dex2Oat FINAL {
// Verification results are only required for modes that have any compilation. Avoid
// adding the dex files if possible to prevent allocating large arrays.
if (verification_results_ != nullptr) {
- for (const auto& dex_file : dex_files_) {
+ for (const auto& dex_file : dex_files) {
// Pre-register dex files so that we can access verification results without locks during
// compilation and verification.
verification_results_->AddDexFile(dex_file);
@@ -1754,7 +1752,7 @@ class Dex2Oat FINAL {
// Doesn't return the class loader since it's not meant to be used for image compilation.
void CompileDexFilesIndividually() {
CHECK(!IsImage()) << "Not supported with image";
- for (const DexFile* dex_file : dex_files_) {
+ for (const DexFile* dex_file : compiler_options_->dex_files_for_oat_file_) {
std::vector<const DexFile*> dex_files(1u, dex_file);
VLOG(compiler) << "Compiling " << dex_file->GetLocation();
jobject class_loader = CompileDexFiles(dex_files);
@@ -1785,7 +1783,7 @@ class Dex2Oat FINAL {
// mode (to reduce RAM used by the compiler).
return !IsImage() &&
!update_input_vdex_ &&
- dex_files_.size() > 1 &&
+ compiler_options_->dex_files_for_oat_file_.size() > 1 &&
!CompilerFilter::IsAotCompilationEnabled(compiler_options_->GetCompilerFilter());
}
@@ -1812,10 +1810,12 @@ class Dex2Oat FINAL {
class_path_files = class_loader_context_->FlattenOpenedDexFiles();
}
- std::vector<const std::vector<const DexFile*>*> dex_file_vectors = {
+ const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
+ std::vector<const DexFile*> no_inline_from_dex_files;
+ const std::vector<const DexFile*>* dex_file_vectors[] = {
&class_linker->GetBootClassPath(),
&class_path_files,
- &dex_files_
+ &dex_files
};
for (const std::vector<const DexFile*>* dex_file_vector : dex_file_vectors) {
for (const DexFile* dex_file : *dex_file_vector) {
@@ -1834,27 +1834,24 @@ class Dex2Oat FINAL {
if (android::base::StartsWith(dex_location, filter.c_str())) {
VLOG(compiler) << "Disabling inlining from " << dex_file->GetLocation();
- no_inline_from_dex_files_.push_back(dex_file);
+ no_inline_from_dex_files.push_back(dex_file);
break;
}
}
}
}
- if (!no_inline_from_dex_files_.empty()) {
- compiler_options_->no_inline_from_ = &no_inline_from_dex_files_;
+ if (!no_inline_from_dex_files.empty()) {
+ compiler_options_->no_inline_from_.swap(no_inline_from_dex_files);
}
}
driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
compiler_kind_,
- instruction_set_,
- instruction_set_features_.get(),
- image_classes_.release(),
+ &compiler_options_->image_classes_,
thread_count_,
swap_fd_,
profile_compilation_info_.get()));
- driver_->SetDexFilesForOatFile(dex_files_);
if (!IsBootImage()) {
driver_->SetClasspathDexFiles(class_loader_context_->FlattenOpenedDexFiles());
}
@@ -1870,9 +1867,10 @@ class Dex2Oat FINAL {
}
// Setup vdex for compilation.
+ const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
if (!DoEagerUnquickeningOfVdex() && input_vdex_file_ != nullptr) {
callbacks_->SetVerifierDeps(
- new verifier::VerifierDeps(dex_files_, input_vdex_file_->GetVerifierDepsData()));
+ new verifier::VerifierDeps(dex_files, input_vdex_file_->GetVerifierDepsData()));
// TODO: we unquicken unconditionally, as we don't know
// if the boot image has changed. How exactly we'll know is under
@@ -1882,11 +1880,11 @@ class Dex2Oat FINAL {
// We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
// optimization does not depend on the boot image (the optimization relies on not
// having final fields in a class, which does not change for an app).
- input_vdex_file_->Unquicken(dex_files_, /* decompile_return_instruction */ false);
+ input_vdex_file_->Unquicken(dex_files, /* decompile_return_instruction */ false);
} else {
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
- callbacks_->SetVerifierDeps(new verifier::VerifierDeps(dex_files_));
+ callbacks_->SetVerifierDeps(new verifier::VerifierDeps(dex_files));
}
// Invoke the compilation.
if (compile_individually) {
@@ -1894,7 +1892,7 @@ class Dex2Oat FINAL {
// Return a null classloader since we already freed released it.
return nullptr;
}
- return CompileDexFiles(dex_files_);
+ return CompileDexFiles(dex_files);
}
// Create the class loader, use it to compile, and return.
@@ -1903,7 +1901,8 @@ class Dex2Oat FINAL {
jobject class_loader = nullptr;
if (!IsBootImage()) {
- class_loader = class_loader_context_->CreateClassLoader(dex_files_);
+ class_loader =
+ class_loader_context_->CreateClassLoader(compiler_options_->dex_files_for_oat_file_);
callbacks_->SetDexFiles(&dex_files);
}
@@ -2021,7 +2020,7 @@ class Dex2Oat FINAL {
VLOG(compiler) << "App image base=" << reinterpret_cast<void*>(image_base_);
}
- image_writer_.reset(new linker::ImageWriter(*driver_,
+ image_writer_.reset(new linker::ImageWriter(*compiler_options_,
image_base_,
compiler_options_->GetCompilePic(),
IsAppImage(),
@@ -2076,8 +2075,8 @@ class Dex2Oat FINAL {
{
TimingLogger::ScopedTiming t2("dex2oat Write ELF", timings_);
- linker::MultiOatRelativePatcher patcher(instruction_set_,
- instruction_set_features_.get(),
+ linker::MultiOatRelativePatcher patcher(compiler_options_->GetInstructionSet(),
+ compiler_options_->GetInstructionSetFeatures(),
driver_->GetCompiledMethodStorage());
for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
std::unique_ptr<linker::ElfWriter>& elf_writer = elf_writers_[i];
@@ -2373,7 +2372,7 @@ class Dex2Oat FINAL {
return dex_files_size >= min_dex_file_cumulative_size_for_swap_;
}
- bool IsVeryLarge(std::vector<const DexFile*>& dex_files) {
+ bool IsVeryLarge(const std::vector<const DexFile*>& dex_files) {
size_t dex_files_size = 0;
for (const auto* dex_file : dex_files) {
dex_files_size += dex_file->GetHeader().file_size_;
@@ -2383,27 +2382,27 @@ class Dex2Oat FINAL {
bool PrepareImageClasses() {
// If --image-classes was specified, calculate the full list of classes to include in the image.
+ DCHECK(compiler_options_->image_classes_.empty());
if (image_classes_filename_ != nullptr) {
- image_classes_ =
+ std::unique_ptr<HashSet<std::string>> image_classes =
ReadClasses(image_classes_zip_filename_, image_classes_filename_, "image");
- if (image_classes_ == nullptr) {
+ if (image_classes == nullptr) {
return false;
}
- } else if (IsBootImage()) {
- image_classes_.reset(new std::unordered_set<std::string>);
+ compiler_options_->image_classes_.swap(*image_classes);
}
return true;
}
- static std::unique_ptr<std::unordered_set<std::string>> ReadClasses(const char* zip_filename,
- const char* classes_filename,
- const char* tag) {
- std::unique_ptr<std::unordered_set<std::string>> classes;
+ static std::unique_ptr<HashSet<std::string>> ReadClasses(const char* zip_filename,
+ const char* classes_filename,
+ const char* tag) {
+ std::unique_ptr<HashSet<std::string>> classes;
std::string error_msg;
if (zip_filename != nullptr) {
- classes.reset(ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg));
+ classes = ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg);
} else {
- classes.reset(ReadImageClassesFromFile(classes_filename));
+ classes = ReadImageClassesFromFile(classes_filename);
}
if (classes == nullptr) {
LOG(ERROR) << "Failed to create list of " << tag << " classes from '"
@@ -2414,9 +2413,9 @@ class Dex2Oat FINAL {
bool PrepareDirtyObjects() {
if (dirty_image_objects_filename_ != nullptr) {
- dirty_image_objects_.reset(ReadCommentedInputFromFile<std::unordered_set<std::string>>(
+ dirty_image_objects_ = ReadCommentedInputFromFile<HashSet<std::string>>(
dirty_image_objects_filename_,
- nullptr));
+ nullptr);
if (dirty_image_objects_ == nullptr) {
LOG(ERROR) << "Failed to create list of dirty objects from '"
<< dirty_image_objects_filename_ << "'";
@@ -2486,17 +2485,14 @@ class Dex2Oat FINAL {
elf_writers_.reserve(oat_files_.size());
oat_writers_.reserve(oat_files_.size());
for (const std::unique_ptr<File>& oat_file : oat_files_) {
- elf_writers_.emplace_back(linker::CreateElfWriterQuick(instruction_set_,
- instruction_set_features_.get(),
- compiler_options_.get(),
- oat_file.get()));
+ elf_writers_.emplace_back(linker::CreateElfWriterQuick(*compiler_options_, oat_file.get()));
elf_writers_.back()->Start();
bool do_oat_writer_layout = DoDexLayoutOptimizations() || DoOatLayoutOptimizations();
if (profile_compilation_info_ != nullptr && profile_compilation_info_->IsEmpty()) {
do_oat_writer_layout = false;
}
oat_writers_.emplace_back(new linker::OatWriter(
- IsBootImage(),
+ *compiler_options_,
timings_,
do_oat_writer_layout ? profile_compilation_info_.get() : nullptr,
compact_dex_level_));
@@ -2504,8 +2500,9 @@ class Dex2Oat FINAL {
}
void SaveDexInput() {
- for (size_t i = 0; i < dex_files_.size(); ++i) {
- const DexFile* dex_file = dex_files_[i];
+ const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
+ for (size_t i = 0, size = dex_files.size(); i != size; ++i) {
+ const DexFile* dex_file = dex_files[i];
std::string tmp_file_name(StringPrintf("/data/local/tmp/dex2oat.%d.%zd.dex",
getpid(), i));
std::unique_ptr<File> tmp_file(OS::CreateEmptyFile(tmp_file_name.c_str()));
@@ -2543,7 +2540,8 @@ class Dex2Oat FINAL {
raw_options.push_back(std::make_pair("compilercallbacks", callbacks));
raw_options.push_back(
- std::make_pair("imageinstructionset", GetInstructionSetString(instruction_set_)));
+ std::make_pair("imageinstructionset",
+ GetInstructionSetString(compiler_options_->GetInstructionSet())));
// Only allow no boot image for the runtime if we're compiling one. When we compile an app,
// we don't want fallback mode, it will abort as we do not push a boot classpath (it might
@@ -2606,7 +2604,7 @@ class Dex2Oat FINAL {
SetThreadName(kIsDebugBuild ? "dex2oatd" : "dex2oat");
runtime_.reset(Runtime::Current());
- runtime_->SetInstructionSet(instruction_set_);
+ runtime_->SetInstructionSet(compiler_options_->GetInstructionSet());
for (uint32_t i = 0; i < static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
CalleeSaveType type = CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
@@ -2618,11 +2616,11 @@ class Dex2Oat FINAL {
// set up.
interpreter::UnstartedRuntime::Initialize();
- runtime_->GetClassLinker()->RunRootClinits();
+ Thread* self = Thread::Current();
+ runtime_->RunRootClinits(self);
// Runtime::Create acquired the mutator_lock_ that is normally given away when we
// Runtime::Start, give it away now so that we don't starve GC.
- Thread* self = Thread::Current();
self->TransitionFromRunnableToSuspended(kNative);
return true;
@@ -2678,45 +2676,43 @@ class Dex2Oat FINAL {
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::unordered_set<std::string>* ReadImageClassesFromFile(
+ static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromFile(
const char* image_classes_filename) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromFile<std::unordered_set<std::string>>(image_classes_filename,
- &process);
+ return ReadCommentedInputFromFile<HashSet<std::string>>(image_classes_filename, &process);
}
// Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
- static std::unordered_set<std::string>* ReadImageClassesFromZip(
+ static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromZip(
const char* zip_filename,
const char* image_classes_filename,
std::string* error_msg) {
std::function<std::string(const char*)> process = DotToDescriptor;
- return ReadCommentedInputFromZip<std::unordered_set<std::string>>(zip_filename,
- image_classes_filename,
- &process,
- error_msg);
+ return ReadCommentedInputFromZip<HashSet<std::string>>(zip_filename,
+ image_classes_filename,
+ &process,
+ error_msg);
}
// Read lines from the given file, dropping comments and empty lines. Post-process each line with
// the given function.
template <typename T>
- static T* ReadCommentedInputFromFile(
+ static std::unique_ptr<T> ReadCommentedInputFromFile(
const char* input_filename, std::function<std::string(const char*)>* process) {
std::unique_ptr<std::ifstream> input_file(new std::ifstream(input_filename, std::ifstream::in));
if (input_file.get() == nullptr) {
LOG(ERROR) << "Failed to open input file " << input_filename;
return nullptr;
}
- std::unique_ptr<T> result(
- ReadCommentedInputStream<T>(*input_file, process));
+ std::unique_ptr<T> result = ReadCommentedInputStream<T>(*input_file, process);
input_file->close();
- return result.release();
+ return result;
}
// Read lines from the given file from the given zip file, dropping comments and empty lines.
// Post-process each line with the given function.
template <typename T>
- static T* ReadCommentedInputFromZip(
+ static std::unique_ptr<T> ReadCommentedInputFromZip(
const char* zip_filename,
const char* input_filename,
std::function<std::string(const char*)>* process,
@@ -2748,7 +2744,7 @@ class Dex2Oat FINAL {
// Read lines from the given stream, dropping comments and empty lines. Post-process each line
// with the given function.
template <typename T>
- static T* ReadCommentedInputStream(
+ static std::unique_ptr<T> ReadCommentedInputStream(
std::istream& in_stream,
std::function<std::string(const char*)>* process) {
std::unique_ptr<T> output(new T());
@@ -2765,7 +2761,7 @@ class Dex2Oat FINAL {
output->insert(output->end(), dot);
}
}
- return output.release();
+ return output;
}
void LogCompletionTime() {
@@ -2803,9 +2799,6 @@ class Dex2Oat FINAL {
std::unique_ptr<CompilerOptions> compiler_options_;
Compiler::Kind compiler_kind_;
- InstructionSet instruction_set_;
- std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
-
uint32_t image_file_location_oat_checksum_;
uintptr_t image_file_location_oat_data_begin_;
int32_t image_patch_delta_;
@@ -2854,16 +2847,11 @@ class Dex2Oat FINAL {
ImageHeader::StorageMode image_storage_mode_;
const char* passes_to_run_filename_;
const char* dirty_image_objects_filename_;
- std::unique_ptr<std::unordered_set<std::string>> image_classes_;
- std::unique_ptr<std::unordered_set<std::string>> compiled_classes_;
- std::unique_ptr<std::unordered_set<std::string>> compiled_methods_;
- std::unique_ptr<std::unordered_set<std::string>> dirty_image_objects_;
+ std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
std::unique_ptr<std::vector<std::string>> passes_to_run_;
bool multi_image_;
bool is_host_;
std::string android_root_;
- // Dex files we are compiling, does not include the class path dex files.
- std::vector<const DexFile*> dex_files_;
std::string no_inline_from_string_;
CompactDexLevel compact_dex_level_ = kDefaultCompactDexLevel;
@@ -2877,9 +2865,6 @@ class Dex2Oat FINAL {
std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
- // Note that this might contain pointers owned by class_loader_context_.
- std::vector<const DexFile*> no_inline_from_dex_files_;
-
bool avoid_storing_invocation_;
std::string swap_file_name_;
int swap_fd_;
@@ -2909,7 +2894,7 @@ class Dex2Oat FINAL {
// By default, copy the dex to the vdex file only if dex files are
// compressed in APK.
- CopyOption copy_dex_files_ = CopyOption::kOnlyIfCompressed;
+ linker::CopyOption copy_dex_files_ = linker::CopyOption::kOnlyIfCompressed;
// The reason for invoking the compiler.
std::string compilation_reason_;
@@ -3119,9 +3104,9 @@ static dex2oat::ReturnCode Dex2oat(int argc, char** argv) {
int main(int argc, char** argv) {
int result = static_cast<int>(art::Dex2oat(argc, argv));
// Everything was done, do an explicit exit here to avoid running Runtime destructors that take
- // time (bug 10645725) unless we're a debug or instrumented build or running on valgrind. Note:
- // The Dex2Oat class should not destruct the runtime in this case.
- if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && (RUNNING_ON_MEMORY_TOOL == 0)) {
+ // time (bug 10645725) unless we're a debug or instrumented build or running on a memory tool.
+ // Note: The Dex2Oat class should not destruct the runtime in this case.
+ if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && !art::kRunningOnMemoryTool) {
_exit(result);
}
return result;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 03664673c3..ae8e1b7597 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -232,6 +232,11 @@ class Dex2oatImageTest : public CommonRuntimeTest {
};
TEST_F(Dex2oatImageTest, TestModesAndFilters) {
+ // This test crashes on the gtest-heap-poisoning configuration
+ // (AddressSanitizer + CMS/RosAlloc + heap-poisoning); see b/111061592.
+ // Temporarily disable this test on this configuration to keep
+ // our automated build/testing green while we work on a fix.
+ TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING_WITHOUT_READ_BARRIERS();
if (kIsTargetBuild) {
// This test is too slow for target builds.
return;
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index dbb00c22e9..bf9edf7384 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -222,10 +222,10 @@ static Parser CreateArgumentParser() {
.Define("--force-determinism")
.IntoKey(M::ForceDeterminism)
.Define("--copy-dex-files=_")
- .WithType<CopyOption>()
- .WithValueMap({{"true", CopyOption::kOnlyIfCompressed},
- {"false", CopyOption::kNever},
- {"always", CopyOption::kAlways}})
+ .WithType<linker::CopyOption>()
+ .WithValueMap({{"true", linker::CopyOption::kOnlyIfCompressed},
+ {"false", linker::CopyOption::kNever},
+ {"always", linker::CopyOption::kAlways}})
.IntoKey(M::CopyDexFiles)
.Define("--classpath-dir=_")
.WithType<std::string>()
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index 7be8e56501..fe5c4e69a7 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -70,7 +70,7 @@ DEX2OAT_OPTIONS_KEY (Unit, Host)
DEX2OAT_OPTIONS_KEY (Unit, DumpTiming)
DEX2OAT_OPTIONS_KEY (Unit, DumpPasses)
DEX2OAT_OPTIONS_KEY (Unit, DumpStats)
-DEX2OAT_OPTIONS_KEY (CopyOption, CopyDexFiles)
+DEX2OAT_OPTIONS_KEY (linker::CopyOption, CopyDexFiles)
DEX2OAT_OPTIONS_KEY (Unit, AvoidStoringInvocation)
DEX2OAT_OPTIONS_KEY (std::string, SwapFile)
DEX2OAT_OPTIONS_KEY (int, SwapFileFd)
diff --git a/dex2oat/dex2oat_options.h b/dex2oat/dex2oat_options.h
index cc124c1afa..27d3d25f2a 100644
--- a/dex2oat/dex2oat_options.h
+++ b/dex2oat/dex2oat_options.h
@@ -28,6 +28,7 @@
#include "dex/compact_dex_level.h"
#include "driver/compiler_options_map.h"
#include "image.h"
+#include "linker/oat_writer.h"
namespace art {
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 96d7dba225..ad44624f76 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -472,8 +472,8 @@ class Dex2oatSwapUseTest : public Dex2oatSwapTest {
};
TEST_F(Dex2oatSwapUseTest, CheckSwapUsage) {
- // Native memory usage isn't correctly tracked under sanitization.
- TEST_DISABLED_FOR_MEMORY_TOOL_ASAN();
+ // Native memory usage isn't correctly tracked when running under ASan.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
// The `native_alloc_2_ >= native_alloc_1_` assertion below may not
// hold true on some x86 systems; disable this test while we
@@ -1054,8 +1054,6 @@ TEST_F(Dex2oatWatchdogTest, TestWatchdogOK) {
}
TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
- TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND(); // b/63052624
-
// The watchdog is independent of dex2oat and will not delete intermediates. It is possible
// that the compilation succeeds and the file is completely written by the time the watchdog
// kills dex2oat (but the dex2oat threads must have been scheduled pretty badly).
@@ -1770,7 +1768,7 @@ TEST_F(Dex2oatTest, CompactDexGenerationFailureMultiDex) {
writer.Finish();
ASSERT_EQ(apk_file.GetFile()->Flush(), 0);
}
- const std::string dex_location = apk_file.GetFilename();
+ const std::string& dex_location = apk_file.GetFilename();
const std::string odex_location = GetOdexDir() + "/output.odex";
GenerateOdexForTest(dex_location,
odex_location,
@@ -1976,7 +1974,7 @@ TEST_F(Dex2oatTest, QuickenedInput) {
<< "Failed to find candidate code item with only one code unit in last instruction.";
});
- std::string dex_location = temp_dex.GetFilename();
+ const std::string& dex_location = temp_dex.GetFilename();
std::string odex_location = GetOdexDir() + "/quickened.odex";
std::string vdex_location = GetOdexDir() + "/quickened.vdex";
std::unique_ptr<File> vdex_output(OS::CreateEmptyFile(vdex_location.c_str()));
@@ -2051,7 +2049,7 @@ TEST_F(Dex2oatTest, CompactDexInvalidSource) {
writer.Finish();
ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0);
}
- const std::string dex_location = invalid_dex.GetFilename();
+ const std::string& dex_location = invalid_dex.GetFilename();
const std::string odex_location = GetOdexDir() + "/output.odex";
std::string error_msg;
int status = GenerateOdexForTestWithStatus(
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.cc b/dex2oat/linker/arm/relative_patcher_arm_base.cc
index 7cb8ae55c5..a2ba339278 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.cc
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.cc
@@ -251,7 +251,7 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn
continue;
}
// Get the base name to use for the first occurrence of the thunk.
- std::string base_name = data.GetDebugName();
+ const std::string& base_name = data.GetDebugName();
for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
debug::MethodDebugInfo info = {};
if (i == 0u) {
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
index 3fe97e146c..3d7277aab3 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
+++ b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
@@ -197,10 +197,7 @@ class Thumb2RelativePatcherTest : public RelativePatcherTest {
OptimizingUnitTestHelper helper;
HGraph* graph = helper.CreateGraph();
std::string error_msg;
- ArmFeaturesUniquePtr features =
- ArmInstructionSetFeatures::FromVariant("default", &error_msg);
- CompilerOptions options;
- arm::CodeGeneratorARMVIXL codegen(graph, *features, options);
+ arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options_);
ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
codegen.EmitThunkCode(patch, &code, debug_name);
return std::vector<uint8_t>(code.begin(), code.end());
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.cc b/dex2oat/linker/arm64/relative_patcher_arm64.cc
index 71d1287c87..dd0fcfe0be 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.cc
@@ -61,6 +61,7 @@ inline bool IsAdrpPatch(const LinkerPatch& patch) {
case LinkerPatch::Type::kCallRelative:
case LinkerPatch::Type::kBakerReadBarrierBranch:
return false;
+ case LinkerPatch::Type::kIntrinsicReference:
case LinkerPatch::Type::kDataBimgRelRo:
case LinkerPatch::Type::kMethodRelative:
case LinkerPatch::Type::kMethodBssEntry:
@@ -258,12 +259,14 @@ void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
if ((insn & 0xfffffc00) == 0x91000000) {
// ADD immediate, 64-bit with imm12 == 0 (unset).
if (!kEmitCompilerReadBarrier) {
- DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
+ DCHECK(patch.GetType() == LinkerPatch::Type::kIntrinsicReference ||
+ patch.GetType() == LinkerPatch::Type::kMethodRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
patch.GetType() == LinkerPatch::Type::kStringRelative) << patch.GetType();
} else {
// With the read barrier (non-Baker) enabled, it could be kStringBssEntry or kTypeBssEntry.
- DCHECK(patch.GetType() == LinkerPatch::Type::kMethodRelative ||
+ DCHECK(patch.GetType() == LinkerPatch::Type::kIntrinsicReference ||
+ patch.GetType() == LinkerPatch::Type::kMethodRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
patch.GetType() == LinkerPatch::Type::kStringRelative ||
patch.GetType() == LinkerPatch::Type::kTypeBssEntry ||
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index 393733dd0c..07e6860f9c 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -176,10 +176,7 @@ class Arm64RelativePatcherTest : public RelativePatcherTest {
OptimizingUnitTestHelper helper;
HGraph* graph = helper.CreateGraph();
std::string error_msg;
- Arm64FeaturesUniquePtr features =
- Arm64InstructionSetFeatures::FromVariant("default", &error_msg);
- CompilerOptions options;
- arm64::CodeGeneratorARM64 codegen(graph, *features, options);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
ArenaVector<uint8_t> code(helper.GetAllocator()->Adapter());
codegen.EmitThunkCode(patch, &code, debug_name);
return std::vector<uint8_t>(code.begin(), code.end());
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 58bd1b0f1f..8f6ff702cc 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -96,9 +96,7 @@ class DebugInfoTask : public Task {
template <typename ElfTypes>
class ElfWriterQuick FINAL : public ElfWriter {
public:
- ElfWriterQuick(InstructionSet instruction_set,
- const InstructionSetFeatures* features,
- const CompilerOptions* compiler_options,
+ ElfWriterQuick(const CompilerOptions& compiler_options,
File* elf_file);
~ElfWriterQuick();
@@ -129,8 +127,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
std::vector<uint8_t>* buffer);
private:
- const InstructionSetFeatures* instruction_set_features_;
- const CompilerOptions* const compiler_options_;
+ const CompilerOptions& compiler_options_;
File* const elf_file_;
size_t rodata_size_;
size_t text_size_;
@@ -147,30 +144,18 @@ class ElfWriterQuick FINAL : public ElfWriter {
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
-std::unique_ptr<ElfWriter> CreateElfWriterQuick(InstructionSet instruction_set,
- const InstructionSetFeatures* features,
- const CompilerOptions* compiler_options,
+std::unique_ptr<ElfWriter> CreateElfWriterQuick(const CompilerOptions& compiler_options,
File* elf_file) {
- if (Is64BitInstructionSet(instruction_set)) {
- return std::make_unique<ElfWriterQuick<ElfTypes64>>(instruction_set,
- features,
- compiler_options,
- elf_file);
+ if (Is64BitInstructionSet(compiler_options.GetInstructionSet())) {
+ return std::make_unique<ElfWriterQuick<ElfTypes64>>(compiler_options, elf_file);
} else {
- return std::make_unique<ElfWriterQuick<ElfTypes32>>(instruction_set,
- features,
- compiler_options,
- elf_file);
+ return std::make_unique<ElfWriterQuick<ElfTypes32>>(compiler_options, elf_file);
}
}
template <typename ElfTypes>
-ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
- const InstructionSetFeatures* features,
- const CompilerOptions* compiler_options,
- File* elf_file)
+ElfWriterQuick<ElfTypes>::ElfWriterQuick(const CompilerOptions& compiler_options, File* elf_file)
: ElfWriter(),
- instruction_set_features_(features),
compiler_options_(compiler_options),
elf_file_(elf_file),
rodata_size_(0u),
@@ -180,7 +165,9 @@ ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
dex_section_size_(0u),
output_stream_(
std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(elf_file))),
- builder_(new ElfBuilder<ElfTypes>(instruction_set, features, output_stream_.get())) {}
+ builder_(new ElfBuilder<ElfTypes>(compiler_options_.GetInstructionSet(),
+ compiler_options_.GetInstructionSetFeatures(),
+ output_stream_.get())) {}
template <typename ElfTypes>
ElfWriterQuick<ElfTypes>::~ElfWriterQuick() {}
@@ -188,7 +175,7 @@ ElfWriterQuick<ElfTypes>::~ElfWriterQuick() {}
template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::Start() {
builder_->Start();
- if (compiler_options_->GetGenerateBuildId()) {
+ if (compiler_options_.GetGenerateBuildId()) {
builder_->GetBuildId()->AllocateVirtualMemory(builder_->GetBuildId()->GetSize());
builder_->WriteBuildIdSection();
}
@@ -272,12 +259,12 @@ void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_info) {
- if (!debug_info.Empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
+ if (!debug_info.Empty() && compiler_options_.GetGenerateMiniDebugInfo()) {
// Prepare the mini-debug-info in background while we do other I/O.
Thread* self = Thread::Current();
debug_info_task_ = std::unique_ptr<DebugInfoTask>(
new DebugInfoTask(builder_->GetIsa(),
- instruction_set_features_,
+ compiler_options_.GetInstructionSetFeatures(),
builder_->GetText()->GetAddress(),
text_size_,
builder_->GetDex()->Exists() ? builder_->GetDex()->GetAddress() : 0,
@@ -293,11 +280,11 @@ void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_in
template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info) {
if (!debug_info.Empty()) {
- if (compiler_options_->GetGenerateDebugInfo()) {
+ if (compiler_options_.GetGenerateDebugInfo()) {
// Generate all the debug information we can.
debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
}
- if (compiler_options_->GetGenerateMiniDebugInfo()) {
+ if (compiler_options_.GetGenerateMiniDebugInfo()) {
// Wait for the mini-debug-info generation to finish and write it to disk.
Thread* self = Thread::Current();
DCHECK(debug_info_thread_pool_ != nullptr);
@@ -310,7 +297,7 @@ void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info
template <typename ElfTypes>
bool ElfWriterQuick<ElfTypes>::End() {
builder_->End();
- if (compiler_options_->GetGenerateBuildId()) {
+ if (compiler_options_.GetGenerateBuildId()) {
uint8_t build_id[ElfBuilder<ElfTypes>::kBuildIdLen];
ComputeFileBuildId(&build_id);
builder_->WriteBuildId(build_id);
diff --git a/dex2oat/linker/elf_writer_quick.h b/dex2oat/linker/elf_writer_quick.h
index 274d18b858..333c6e3b06 100644
--- a/dex2oat/linker/elf_writer_quick.h
+++ b/dex2oat/linker/elf_writer_quick.h
@@ -30,9 +30,7 @@ class InstructionSetFeatures;
namespace linker {
-std::unique_ptr<ElfWriter> CreateElfWriterQuick(InstructionSet instruction_set,
- const InstructionSetFeatures* features,
- const CompilerOptions* compiler_options,
+std::unique_ptr<ElfWriter> CreateElfWriterQuick(const CompilerOptions& compiler_options,
File* elf_file);
} // namespace linker
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index f0daf69850..fa8c7784f5 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -27,6 +27,7 @@
#include "art_method-inl.h"
#include "base/file_utils.h"
+#include "base/hash_set.h"
#include "base/unix_file/fd_file.h"
#include "base/utils.h"
#include "class_linker-inl.h"
@@ -34,6 +35,7 @@
#include "compiler_callbacks.h"
#include "debug/method_debug_info.h"
#include "dex/quick_compiler_callbacks.h"
+#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "gc/space/image_space.h"
#include "image_writer.h"
@@ -62,9 +64,6 @@ struct CompilationHelper {
std::vector<ScratchFile> vdex_files;
std::string image_dir;
- void Compile(CompilerDriver* driver,
- ImageHeader::StorageMode storage_mode);
-
std::vector<size_t> GetImageObjectSectionSizes();
~CompilationHelper();
@@ -80,7 +79,7 @@ class ImageTest : public CommonCompilerTest {
void TestWriteRead(ImageHeader::StorageMode storage_mode);
void Compile(ImageHeader::StorageMode storage_mode,
- CompilationHelper& out_helper,
+ /*out*/ CompilationHelper& out_helper,
const std::string& extra_dex = "",
const std::initializer_list<std::string>& image_classes = {});
@@ -93,8 +92,8 @@ class ImageTest : public CommonCompilerTest {
options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
}
- std::unordered_set<std::string>* GetImageClasses() OVERRIDE {
- return new std::unordered_set<std::string>(image_classes_);
+ std::unique_ptr<HashSet<std::string>> GetImageClasses() OVERRIDE {
+ return std::make_unique<HashSet<std::string>>(image_classes_);
}
ArtMethod* FindCopiedMethod(ArtMethod* origin, ObjPtr<mirror::Class> klass)
@@ -110,7 +109,9 @@ class ImageTest : public CommonCompilerTest {
}
private:
- std::unordered_set<std::string> image_classes_;
+ void DoCompile(ImageHeader::StorageMode storage_mode, /*out*/ CompilationHelper& out_helper);
+
+ HashSet<std::string> image_classes_;
};
inline CompilationHelper::~CompilationHelper() {
@@ -140,12 +141,13 @@ inline std::vector<size_t> CompilationHelper::GetImageObjectSectionSizes() {
return ret;
}
-inline void CompilationHelper::Compile(CompilerDriver* driver,
- ImageHeader::StorageMode storage_mode) {
+inline void ImageTest::DoCompile(ImageHeader::StorageMode storage_mode,
+ /*out*/ CompilationHelper& out_helper) {
+ CompilerDriver* driver = compiler_driver_.get();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
std::vector<const DexFile*> class_path = class_linker->GetBootClassPath();
- for (const std::unique_ptr<const DexFile>& dex_file : extra_dex_files) {
+ for (const std::unique_ptr<const DexFile>& dex_file : out_helper.extra_dex_files) {
{
ScopedObjectAccess soa(Thread::Current());
// Inject in boot class path so that the compiler driver can see it.
@@ -156,7 +158,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
// Enable write for dex2dex.
for (const DexFile* dex_file : class_path) {
- dex_file_locations.push_back(dex_file->GetLocation());
+ out_helper.dex_file_locations.push_back(dex_file->GetLocation());
if (dex_file->IsReadOnly()) {
dex_file->EnableWrite();
}
@@ -167,31 +169,31 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
for (int i = 0; i < static_cast<int>(class_path.size()); ++i) {
std::string cur_location =
android::base::StringPrintf("%s-%d.art", location.GetFilename().c_str(), i);
- image_locations.push_back(ScratchFile(cur_location));
+ out_helper.image_locations.push_back(ScratchFile(cur_location));
}
}
std::vector<std::string> image_filenames;
- for (ScratchFile& file : image_locations) {
+ for (ScratchFile& file : out_helper.image_locations) {
std::string image_filename(GetSystemImageFilename(file.GetFilename().c_str(), kRuntimeISA));
image_filenames.push_back(image_filename);
size_t pos = image_filename.rfind('/');
CHECK_NE(pos, std::string::npos) << image_filename;
- if (image_dir.empty()) {
- image_dir = image_filename.substr(0, pos);
- int mkdir_result = mkdir(image_dir.c_str(), 0700);
- CHECK_EQ(0, mkdir_result) << image_dir;
+ if (out_helper.image_dir.empty()) {
+ out_helper.image_dir = image_filename.substr(0, pos);
+ int mkdir_result = mkdir(out_helper.image_dir.c_str(), 0700);
+ CHECK_EQ(0, mkdir_result) << out_helper.image_dir;
}
- image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str())));
+ out_helper.image_files.push_back(ScratchFile(OS::CreateEmptyFile(image_filename.c_str())));
}
std::vector<std::string> oat_filenames;
std::vector<std::string> vdex_filenames;
for (const std::string& image_filename : image_filenames) {
std::string oat_filename = ReplaceFileExtension(image_filename, "oat");
- oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str())));
+ out_helper.oat_files.push_back(ScratchFile(OS::CreateEmptyFile(oat_filename.c_str())));
oat_filenames.push_back(oat_filename);
std::string vdex_filename = ReplaceFileExtension(image_filename, "vdex");
- vdex_files.push_back(ScratchFile(OS::CreateEmptyFile(vdex_filename.c_str())));
+ out_helper.vdex_files.push_back(ScratchFile(OS::CreateEmptyFile(vdex_filename.c_str())));
vdex_filenames.push_back(vdex_filename);
}
@@ -210,7 +212,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
++image_idx;
}
// TODO: compile_pic should be a test argument.
- std::unique_ptr<ImageWriter> writer(new ImageWriter(*driver,
+ std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_options_,
kRequestedImageBase,
/*compile_pic*/false,
/*compile_app_image*/false,
@@ -223,7 +225,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
jobject class_loader = nullptr;
TimingLogger timings("ImageTest::WriteRead", false, false);
TimingLogger::ScopedTiming t("CompileAll", &timings);
- driver->SetDexFilesForOatFile(class_path);
+ SetDexFilesForOatFile(class_path);
driver->CompileAll(class_loader, class_path, &timings);
t.NewTiming("WriteElf");
@@ -240,13 +242,10 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
std::vector<std::unique_ptr<ElfWriter>> elf_writers;
std::vector<std::unique_ptr<OatWriter>> oat_writers;
- for (ScratchFile& oat_file : oat_files) {
- elf_writers.emplace_back(CreateElfWriterQuick(driver->GetInstructionSet(),
- driver->GetInstructionSetFeatures(),
- &driver->GetCompilerOptions(),
- oat_file.GetFile()));
+ for (ScratchFile& oat_file : out_helper.oat_files) {
+ elf_writers.emplace_back(CreateElfWriterQuick(*compiler_options_, oat_file.GetFile()));
elf_writers.back()->Start();
- oat_writers.emplace_back(new OatWriter(/*compiling_boot_image*/true,
+ oat_writers.emplace_back(new OatWriter(*compiler_options_,
&timings,
/*profile_compilation_info*/nullptr,
CompactDexLevel::kCompactDexLevelNone));
@@ -269,10 +268,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
std::vector<std::unique_ptr<MemMap>> cur_opened_dex_files_maps;
std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
- vdex_files[i].GetFile(),
+ out_helper.vdex_files[i].GetFile(),
rodata.back(),
- driver->GetInstructionSet(),
- driver->GetInstructionSetFeatures(),
&key_value_store,
/* verify */ false, // Dex files may be dex-to-dex-ed, don't verify.
/* update_input_vdex */ false,
@@ -296,10 +293,10 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
bool image_space_ok = writer->PrepareImageAddressSpace(&timings);
ASSERT_TRUE(image_space_ok);
- DCHECK_EQ(vdex_files.size(), oat_files.size());
- for (size_t i = 0, size = oat_files.size(); i != size; ++i) {
- MultiOatRelativePatcher patcher(driver->GetInstructionSet(),
- driver->GetInstructionSetFeatures(),
+ DCHECK_EQ(out_helper.vdex_files.size(), out_helper.oat_files.size());
+ for (size_t i = 0, size = out_helper.oat_files.size(); i != size; ++i) {
+ MultiOatRelativePatcher patcher(compiler_options_->GetInstructionSet(),
+ compiler_options_->GetInstructionSetFeatures(),
driver->GetCompiledMethodStorage());
OatWriter* const oat_writer = oat_writers[i].get();
ElfWriter* const elf_writer = elf_writers[i].get();
@@ -308,7 +305,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
std::unique_ptr<BufferedOutputStream> vdex_out =
std::make_unique<BufferedOutputStream>(
- std::make_unique<FileOutputStream>(vdex_files[i].GetFile()));
+ std::make_unique<FileOutputStream>(out_helper.vdex_files[i].GetFile()));
oat_writer->WriteVerifierDeps(vdex_out.get(), nullptr);
oat_writer->WriteQuickeningInfo(vdex_out.get());
oat_writer->WriteChecksumsAndVdexHeader(vdex_out.get());
@@ -365,8 +362,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
const char* oat_filename = oat_filenames[i].c_str();
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename));
ASSERT_TRUE(oat_file != nullptr);
- bool success_fixup = ElfWriter::Fixup(oat_file.get(),
- writer->GetOatDataBegin(i));
+ bool success_fixup = ElfWriter::Fixup(oat_file.get(), writer->GetOatDataBegin(i));
ASSERT_TRUE(success_fixup);
ASSERT_EQ(oat_file->FlushCloseOrErase(), 0) << "Could not flush and close oat file "
<< oat_filename;
@@ -381,14 +377,15 @@ inline void ImageTest::Compile(ImageHeader::StorageMode storage_mode,
for (const std::string& image_class : image_classes) {
image_classes_.insert(image_class);
}
- CreateCompilerDriver(Compiler::kOptimizing, kRuntimeISA, kIsTargetBuild ? 2U : 16U);
+ number_of_threads_ = kIsTargetBuild ? 2U : 16U;
+ CreateCompilerDriver();
// Set inline filter values.
compiler_options_->SetInlineMaxCodeUnits(CompilerOptions::kDefaultInlineMaxCodeUnits);
image_classes_.clear();
if (!extra_dex.empty()) {
helper.extra_dex_files = OpenTestDexFiles(extra_dex.c_str());
}
- helper.Compile(compiler_driver_.get(), storage_mode);
+ DoCompile(storage_mode, helper);
if (image_classes.begin() != image_classes.end()) {
// Make sure the class got initialized.
ScopedObjectAccess soa(Thread::Current());
@@ -425,9 +422,6 @@ inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
image_file_sizes.push_back(file->GetLength());
}
- ASSERT_TRUE(compiler_driver_->GetImageClasses() != nullptr);
- std::unordered_set<std::string> image_classes(*compiler_driver_->GetImageClasses());
-
// Need to delete the compiler since it has worker threads which are attached to runtime.
compiler_driver_.reset();
@@ -468,6 +462,7 @@ inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
// We loaded the runtime with an explicit image, so it must exist.
ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size());
+ const HashSet<std::string>& image_classes = compiler_options_->GetImageClasses();
for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) {
std::unique_ptr<const DexFile> dex(
LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str()));
@@ -496,7 +491,7 @@ inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode) {
ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
EXPECT_TRUE(klass != nullptr) << descriptor;
uint8_t* raw_klass = reinterpret_cast<uint8_t*>(klass.Ptr());
- if (image_classes.find(descriptor) == image_classes.end()) {
+ if (image_classes.find(StringPiece(descriptor)) == image_classes.end()) {
EXPECT_TRUE(raw_klass >= image_end || raw_klass < image_begin) << descriptor;
} else {
// Image classes should be located inside the image.
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index dc0709013c..de9c3d831d 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -37,7 +37,7 @@
#include "compiled_method.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_types.h"
-#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "elf_file.h"
#include "elf_utils.h"
#include "gc/accounting/card_table-inl.h"
@@ -52,7 +52,6 @@
#include "handle_scope-inl.h"
#include "image.h"
#include "imt_conflict_table.h"
-#include "subtype_check.h"
#include "jni/jni_internal.h"
#include "linear_alloc.h"
#include "lock_word.h"
@@ -71,8 +70,10 @@
#include "oat.h"
#include "oat_file.h"
#include "oat_file_manager.h"
+#include "optimizing/intrinsic_objects.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
+#include "subtype_check.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "well_known_classes.h"
@@ -135,7 +136,7 @@ static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) {
}
bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
- target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
+ target_ptr_size_ = InstructionSetPointerSize(compiler_options_.GetInstructionSet());
gc::Heap* const heap = Runtime::Current()->GetHeap();
{
ScopedObjectAccess soa(Thread::Current());
@@ -437,10 +438,10 @@ void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
}
void ImageWriter::PrepareDexCacheArraySlots() {
- // Prepare dex cache array starts based on the ordering specified in the CompilerDriver.
+ // Prepare dex cache array starts based on the ordering specified in the CompilerOptions.
// Set the slot size early to avoid DCHECK() failures in IsImageBinSlotAssigned()
// when AssignImageBinSlot() assigns their indexes out or order.
- for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
+ for (const DexFile* dex_file : compiler_options_.GetDexFilesForOatFile()) {
auto it = dex_file_oat_index_map_.find(dex_file);
DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
ImageInfo& image_info = GetImageInfo(it->second);
@@ -850,7 +851,7 @@ bool ImageWriter::PruneAppImageClassInternal(
std::string temp;
// Prune if not an image class, this handles any broken sets of image classes such as having a
// class in the set but not it's superclass.
- result = result || !compiler_driver_.IsImageClass(klass->GetDescriptor(&temp));
+ result = result || !compiler_options_.IsImageClass(klass->GetDescriptor(&temp));
bool my_early_exit = false; // Only for ourselves, ignore caller.
// Remove classes that failed to verify since we don't want to have java.lang.VerifyError in the
// app image.
@@ -940,7 +941,7 @@ bool ImageWriter::KeepClass(ObjPtr<mirror::Class> klass) {
return true;
}
std::string temp;
- if (!compiler_driver_.IsImageClass(klass->GetDescriptor(&temp))) {
+ if (!compiler_options_.IsImageClass(klass->GetDescriptor(&temp))) {
return false;
}
if (compile_app_image_) {
@@ -1211,27 +1212,22 @@ void ImageWriter::PruneNonImageClasses() {
}
void ImageWriter::CheckNonImageClassesRemoved() {
- if (compiler_driver_.GetImageClasses() != nullptr) {
- auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
- if (obj->IsClass() && !IsInBootImage(obj)) {
- Class* klass = obj->AsClass();
- if (!KeepClass(klass)) {
- DumpImageClasses();
- std::string temp;
- CHECK(KeepClass(klass))
- << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
- }
+ auto visitor = [&](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (obj->IsClass() && !IsInBootImage(obj)) {
+ Class* klass = obj->AsClass();
+ if (!KeepClass(klass)) {
+ DumpImageClasses();
+ CHECK(KeepClass(klass))
+ << Runtime::Current()->GetHeap()->GetVerification()->FirstPathFromRootSet(klass);
}
- };
- gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->VisitObjects(visitor);
- }
+ }
+ };
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ heap->VisitObjects(visitor);
}
void ImageWriter::DumpImageClasses() {
- auto image_classes = compiler_driver_.GetImageClasses();
- CHECK(image_classes != nullptr);
- for (const std::string& image_class : *image_classes) {
+ for (const std::string& image_class : compiler_options_.GetImageClasses()) {
LOG(INFO) << " " << image_class;
}
}
@@ -1261,15 +1257,8 @@ mirror::String* ImageWriter::FindInternedString(mirror::String* string) {
return nullptr;
}
-
-ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- Thread* self = Thread::Current();
- StackHandleScope<3> hs(self);
- Handle<Class> object_array_class(hs.NewHandle(
- class_linker->FindSystemClass(self, "[Ljava/lang/Object;")));
-
+ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread* self,
+ size_t oat_index) const {
std::unordered_set<const DexFile*> image_dex_files;
for (auto& pair : dex_file_oat_index_map_) {
const DexFile* image_dex_file = pair.first;
@@ -1284,6 +1273,7 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
// ObjectArray, we lock the dex lock twice, first to get the number
// of dex caches first and then lock it again to copy the dex
// caches. We check that the number of dex caches does not change.
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
size_t dex_cache_count = 0;
{
ReaderMutexLock mu(self, *Locks::dex_lock_);
@@ -1300,8 +1290,8 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
}
}
}
- Handle<ObjectArray<Object>> dex_caches(
- hs.NewHandle(ObjectArray<Object>::Alloc(self, object_array_class.Get(), dex_cache_count)));
+ ObjPtr<ObjectArray<Object>> dex_caches = ObjectArray<Object>::Alloc(
+ self, GetClassRoot<ObjectArray<Object>>(class_linker), dex_cache_count);
CHECK(dex_caches != nullptr) << "Failed to allocate a dex cache array.";
{
ReaderMutexLock mu(self, *Locks::dex_lock_);
@@ -1335,11 +1325,23 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
}
}
}
+ return dex_caches;
+}
+
+ObjPtr<ObjectArray<Object>> ImageWriter::CreateImageRoots(
+ size_t oat_index,
+ Handle<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) const {
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ Thread* self = Thread::Current();
+ StackHandleScope<2> hs(self);
+
+ Handle<ObjectArray<Object>> dex_caches(hs.NewHandle(CollectDexCaches(self, oat_index)));
// build an Object[] of the roots needed to restore the runtime
int32_t image_roots_size = ImageHeader::NumberOfImageRoots(compile_app_image_);
- auto image_roots(hs.NewHandle(
- ObjectArray<Object>::Alloc(self, object_array_class.Get(), image_roots_size)));
+ Handle<ObjectArray<Object>> image_roots(hs.NewHandle(ObjectArray<Object>::Alloc(
+ self, GetClassRoot<ObjectArray<Object>>(class_linker), image_roots_size)));
image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
image_roots->Set<false>(ImageHeader::kOomeWhenThrowingException,
@@ -1350,10 +1352,17 @@ ObjectArray<Object>* ImageWriter::CreateImageRoots(size_t oat_index) const {
runtime->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
image_roots->Set<false>(ImageHeader::kNoClassDefFoundError,
runtime->GetPreAllocatedNoClassDefFoundError());
- // image_roots[ImageHeader::kClassLoader] will be set later for app image.
- static_assert(ImageHeader::kClassLoader + 1u == ImageHeader::kImageRootsMax,
- "Class loader should be the last image root.");
- for (int32_t i = 0; i < ImageHeader::kImageRootsMax - 1; ++i) {
+ if (!compile_app_image_) {
+ DCHECK(boot_image_live_objects != nullptr);
+ image_roots->Set<false>(ImageHeader::kBootImageLiveObjects, boot_image_live_objects.Get());
+ } else {
+ DCHECK(boot_image_live_objects == nullptr);
+ }
+ for (int32_t i = 0, num = ImageHeader::NumberOfImageRoots(compile_app_image_); i != num; ++i) {
+ if (compile_app_image_ && i == ImageHeader::kAppImageClassLoader) {
+ // image_roots[ImageHeader::kAppImageClassLoader] will be set later for app image.
+ continue;
+ }
CHECK(image_roots->Get(i) != nullptr);
}
return image_roots.Get();
@@ -1676,13 +1685,17 @@ void ImageWriter::ProcessWorkStack(WorkStack* work_stack) {
void ImageWriter::CalculateNewObjectOffsets() {
Thread* const self = Thread::Current();
+ Runtime* const runtime = Runtime::Current();
VariableSizedHandleScope handles(self);
+ MutableHandle<ObjectArray<Object>> boot_image_live_objects = handles.NewHandle(
+ compile_app_image_
+ ? nullptr
+ : IntrinsicObjects::AllocateBootImageLiveObjects(self, runtime->GetClassLinker()));
std::vector<Handle<ObjectArray<Object>>> image_roots;
for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
- image_roots.push_back(handles.NewHandle(CreateImageRoots(i)));
+ image_roots.push_back(handles.NewHandle(CreateImageRoots(i, boot_image_live_objects)));
}
- Runtime* const runtime = Runtime::Current();
gc::Heap* const heap = runtime->GetHeap();
// Leave space for the header, but do not write it yet, we need to
@@ -1725,12 +1738,15 @@ void ImageWriter::CalculateNewObjectOffsets() {
heap->VisitObjects(deflate_monitor);
}
+ // From this point on, there shall be no GC anymore and no objects shall be allocated.
+ // We can now assign a BitSlot to each object and store it in its lockword.
+
// Work list of <object, oat_index> for objects. Everything on the stack must already be
// assigned a bin slot.
WorkStack work_stack;
// Special case interned strings to put them in the image they are likely to be resolved from.
- for (const DexFile* dex_file : compiler_driver_.GetDexFilesForOatFile()) {
+ for (const DexFile* dex_file : compiler_options_.GetDexFilesForOatFile()) {
auto it = dex_file_oat_index_map_.find(dex_file);
DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
const size_t oat_index = it->second;
@@ -1781,7 +1797,7 @@ void ImageWriter::CalculateNewObjectOffsets() {
CHECK_EQ(class_loaders_.size(), 1u);
CHECK_EQ(image_roots.size(), 1u);
CHECK(*class_loaders_.begin() != nullptr);
- image_roots[0]->Set<false>(ImageHeader::kClassLoader, *class_loaders_.begin());
+ image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, *class_loaders_.begin());
}
// Verify that all objects have assigned image bin slots.
@@ -1881,6 +1897,9 @@ void ImageWriter::CalculateNewObjectOffsets() {
ImageInfo& image_info = GetImageInfo(relocation.oat_index);
relocation.offset += image_info.GetBinSlotOffset(bin_type);
}
+
+ // Remember the boot image live objects as raw pointer. No GC can happen anymore.
+ boot_image_live_objects_ = boot_image_live_objects.Get();
}
size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const {
@@ -2007,6 +2026,28 @@ ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + it->second.offset);
}
+const void* ImageWriter::GetIntrinsicReferenceAddress(uint32_t intrinsic_data) {
+ DCHECK(!compile_app_image_);
+ switch (IntrinsicObjects::DecodePatchType(intrinsic_data)) {
+ case IntrinsicObjects::PatchType::kIntegerValueOfArray: {
+ const uint8_t* base_address =
+ reinterpret_cast<const uint8_t*>(GetImageAddress(boot_image_live_objects_));
+ MemberOffset data_offset =
+ IntrinsicObjects::GetIntegerValueOfArrayDataOffset(boot_image_live_objects_);
+ return base_address + data_offset.Uint32Value();
+ }
+ case IntrinsicObjects::PatchType::kIntegerValueOfObject: {
+ uint32_t index = IntrinsicObjects::DecodePatchIndex(intrinsic_data);
+ ObjPtr<mirror::Object> value =
+ IntrinsicObjects::GetIntegerValueOfObject(boot_image_live_objects_, index);
+ return GetImageAddress(value.Ptr());
+ }
+ }
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+}
+
+
class ImageWriter::FixupRootVisitor : public RootVisitor {
public:
explicit FixupRootVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {
@@ -2094,7 +2135,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
size_t size = ArtMethod::Size(target_ptr_size_);
size_t alignment = ArtMethod::Alignment(target_ptr_size_);
memcpy(dest, pair.first, LengthPrefixedArray<ArtMethod>::ComputeSize(0, size, alignment));
- // Clear padding to avoid non-deterministic data in the image (and placate valgrind).
+ // Clear padding to avoid non-deterministic data in the image.
+ // Historical note: We also did that to placate Valgrind.
reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(dest)->ClearPadding(size, alignment);
break;
}
@@ -2175,12 +2217,7 @@ void ImageWriter::CopyAndFixupObjects() {
CopyAndFixupObject(obj);
};
Runtime::Current()->GetHeap()->VisitObjects(visitor);
- // Fix up the object previously had hash codes.
- for (const auto& hash_pair : saved_hashcode_map_) {
- Object* obj = hash_pair.first;
- DCHECK_EQ(obj->GetLockWord<kVerifyNone>(false).ReadBarrierState(), 0U);
- obj->SetLockWord<kVerifyNone>(LockWord::FromHashCode(hash_pair.second, 0U), false);
- }
+ // We no longer need the hashcode map, values have already been copied to target objects.
saved_hashcode_map_.clear();
}
@@ -2810,23 +2847,24 @@ void ImageWriter::UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_hea
}
ImageWriter::ImageWriter(
- const CompilerDriver& compiler_driver,
+ const CompilerOptions& compiler_options,
uintptr_t image_begin,
bool compile_pic,
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
- const std::unordered_set<std::string>* dirty_image_objects)
- : compiler_driver_(compiler_driver),
+ const HashSet<std::string>* dirty_image_objects)
+ : compiler_options_(compiler_options),
global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
image_objects_offset_begin_(0),
compile_pic_(compile_pic),
compile_app_image_(compile_app_image),
- target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())),
+ target_ptr_size_(InstructionSetPointerSize(compiler_options.GetInstructionSet())),
image_infos_(oat_filenames.size()),
dirty_methods_(0u),
clean_methods_(0u),
+ boot_image_live_objects_(nullptr),
image_storage_mode_(image_storage_mode),
oat_filenames_(oat_filenames),
dex_file_oat_index_map_(dex_file_oat_index_map),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 960d698689..9097cc90c6 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -31,6 +31,7 @@
#include "base/bit_utils.h"
#include "base/dchecked_vector.h"
#include "base/enums.h"
+#include "base/hash_set.h"
#include "base/length_prefixed_array.h"
#include "base/macros.h"
#include "base/mem_map.h"
@@ -38,7 +39,6 @@
#include "base/safe_map.h"
#include "base/utils.h"
#include "class_table.h"
-#include "driver/compiler_driver.h"
#include "image.h"
#include "intern_table.h"
#include "lock_word.h"
@@ -62,6 +62,8 @@ class ClassLoader;
} // namespace mirror
class ClassLoaderVisitor;
+class CompilerOptions;
+template<class T> class Handle;
class ImTable;
class ImtConflictTable;
class TimingLogger;
@@ -73,14 +75,14 @@ namespace linker {
// Write a Space built during compilation for use during execution.
class ImageWriter FINAL {
public:
- ImageWriter(const CompilerDriver& compiler_driver,
+ ImageWriter(const CompilerOptions& compiler_options,
uintptr_t image_begin,
bool compile_pic,
bool compile_app_image,
ImageHeader::StorageMode image_storage_mode,
const std::vector<const char*>& oat_filenames,
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map,
- const std::unordered_set<std::string>* dirty_image_objects);
+ const HashSet<std::string>* dirty_image_objects);
bool PrepareImageAddressSpace(TimingLogger* timings);
@@ -111,6 +113,8 @@ class ImageWriter FINAL {
}
ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ const void* GetIntrinsicReferenceAddress(uint32_t intrinsic_data)
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetOatFileOffset(size_t oat_index) const {
return GetImageInfo(oat_index).oat_offset_;
@@ -450,7 +454,11 @@ class ImageWriter FINAL {
REQUIRES_SHARED(Locks::mutator_lock_);
void CreateHeader(size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
+ ObjPtr<mirror::ObjectArray<mirror::Object>> CollectDexCaches(Thread* self, size_t oat_index) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<mirror::ObjectArray<mirror::Object>> CreateImageRoots(
+ size_t oat_index,
+ Handle<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) const
REQUIRES_SHARED(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -508,9 +516,8 @@ class ImageWriter FINAL {
// classes since we do not want any boot class loader classes in the image. This means that
// we also cannot have any classes which refer to these boot class loader non image classes.
// PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
- // driver.
- bool PruneAppImageClass(ObjPtr<mirror::Class> klass)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ // options.
+ bool PruneAppImageClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
// early_exit is true if we had a cyclic dependency anywhere down the chain.
bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
@@ -572,7 +579,7 @@ class ImageWriter FINAL {
void CopyAndFixupPointer(void** target, void* value);
- const CompilerDriver& compiler_driver_;
+ const CompilerOptions& compiler_options_;
// Beginning target image address for the first image.
uint8_t* global_image_begin_;
@@ -632,6 +639,9 @@ class ImageWriter FINAL {
// null is a valid entry.
std::unordered_set<mirror::ClassLoader*> class_loaders_;
+ // Boot image live objects, null for app image.
+ mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
+
// Which mode the image is stored as, see image.h
const ImageHeader::StorageMode image_storage_mode_;
@@ -642,7 +652,7 @@ class ImageWriter FINAL {
const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
// Set of objects known to be dirty in the image. Can be nullptr if there are none.
- const std::unordered_set<std::string>* dirty_image_objects_;
+ const HashSet<std::string>* dirty_image_objects_;
class ComputeLazyFieldsForClassesVisitor;
class FixupClassVisitor;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 99516684e8..09a0d376e0 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -355,7 +355,7 @@ class OatWriter::OatDexFile {
DCHECK_EQ(static_cast<off_t>(file_offset + offset_), out->Seek(0, kSeekCurrent)) \
<< "file_offset=" << file_offset << " offset_=" << offset_
-OatWriter::OatWriter(bool compiling_boot_image,
+OatWriter::OatWriter(const CompilerOptions& compiler_options,
TimingLogger* timings,
ProfileCompilationInfo* info,
CompactDexLevel compact_dex_level)
@@ -366,8 +366,8 @@ OatWriter::OatWriter(bool compiling_boot_image,
zipped_dex_files_(),
zipped_dex_file_locations_(),
compiler_driver_(nullptr),
+ compiler_options_(compiler_options),
image_writer_(nullptr),
- compiling_boot_image_(compiling_boot_image),
extract_dex_files_into_vdex_(true),
dex_files_(nullptr),
vdex_size_(0u),
@@ -642,15 +642,12 @@ dchecked_vector<std::string> OatWriter::GetSourceLocations() const {
}
bool OatWriter::MayHaveCompiledMethods() const {
- return CompilerFilter::IsAnyCompilationEnabled(
- GetCompilerDriver()->GetCompilerOptions().GetCompilerFilter());
+ return GetCompilerOptions().IsAnyCompilationEnabled();
}
bool OatWriter::WriteAndOpenDexFiles(
File* vdex_file,
OutputStream* oat_rodata,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
SafeMap<std::string, std::string>* key_value_store,
bool verify,
bool update_input_vdex,
@@ -672,9 +669,7 @@ bool OatWriter::WriteAndOpenDexFiles(
// Reserve space for Vdex header and checksums.
vdex_size_ = sizeof(VdexFile::VerifierDepsHeader) +
oat_dex_files_.size() * sizeof(VdexFile::VdexChecksum);
- oat_size_ = InitOatHeader(instruction_set,
- instruction_set_features,
- dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
+ oat_size_ = InitOatHeader(dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
key_value_store);
ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, oat_header_.get());
@@ -703,16 +698,25 @@ bool OatWriter::WriteAndOpenDexFiles(
return true;
}
+// Initialize the writer with the given parameters.
+void OatWriter::Initialize(const CompilerDriver* compiler_driver,
+ ImageWriter* image_writer,
+ const std::vector<const DexFile*>& dex_files) {
+ compiler_driver_ = compiler_driver;
+ image_writer_ = image_writer;
+ dex_files_ = &dex_files;
+}
+
void OatWriter::PrepareLayout(MultiOatRelativePatcher* relative_patcher) {
CHECK(write_state_ == WriteState::kPrepareLayout);
relative_patcher_ = relative_patcher;
SetMultiOatRelativePatcherAdjustment();
- if (compiling_boot_image_) {
+ if (GetCompilerOptions().IsBootImage()) {
CHECK(image_writer_ != nullptr);
}
- InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+ InstructionSet instruction_set = compiler_options_.GetInstructionSet();
CHECK_EQ(instruction_set, oat_header_->GetInstructionSet());
{
@@ -759,7 +763,7 @@ void OatWriter::PrepareLayout(MultiOatRelativePatcher* relative_patcher) {
bss_start_ = (bss_size_ != 0u) ? RoundUp(oat_size_, kPageSize) : 0u;
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
- if (compiling_boot_image_) {
+ if (GetCompilerOptions().IsBootImage()) {
CHECK_EQ(image_writer_ != nullptr,
oat_header_->GetStoreValueByKey(OatHeader::kImageLocationKey) == nullptr);
}
@@ -1157,7 +1161,7 @@ class OatWriter::LayoutCodeMethodVisitor : public OatDexMethodVisitor {
size_t debug_info_idx = OrderedMethodData::kDebugInfoIdxInvalid;
{
- const CompilerOptions& compiler_options = writer_->compiler_driver_->GetCompilerOptions();
+ const CompilerOptions& compiler_options = writer_->GetCompilerOptions();
ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
uint32_t code_size = quick_code.size() * sizeof(uint8_t);
@@ -1238,7 +1242,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
OrderedMethodList ordered_methods)
: LayoutReserveOffsetCodeMethodVisitor(writer,
offset,
- writer->GetCompilerDriver()->GetCompilerOptions(),
+ writer->GetCompilerOptions(),
std::move(ordered_methods)) {
}
@@ -1541,7 +1545,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
size_t offset,
const std::vector<const DexFile*>* dex_files)
: OatDexMethodVisitor(writer, offset),
- pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+ pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
dex_files_(dex_files),
class_linker_(Runtime::Current()->GetClassLinker()) {}
@@ -1609,7 +1613,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
Thread* self = Thread::Current();
ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
ArtMethod* method;
- if (writer_->HasBootImage()) {
+ if (writer_->GetCompilerOptions().IsBootImage()) {
const InvokeType invoke_type = it.GetMethodInvokeType(
dex_file_->GetClassDef(class_def_index_));
// Unchecked as we hold mutator_lock_ on entry.
@@ -1651,7 +1655,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
const DexFile::TypeId& type_id =
dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
- return writer_->GetCompilerDriver()->IsImageClass(class_descriptor);
+ return writer_->GetCompilerOptions().IsImageClass(class_descriptor);
}
// Check whether specified dex file is in the compiled oat file.
@@ -1692,7 +1696,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
writer_(writer),
offset_(relative_offset),
dex_file_(nullptr),
- pointer_size_(GetInstructionSetPointerSize(writer_->compiler_driver_->GetInstructionSet())),
+ pointer_size_(GetInstructionSetPointerSize(writer_->compiler_options_.GetInstructionSet())),
class_loader_(writer->HasImage() ? writer->image_writer_->GetClassLoader() : nullptr),
out_(out),
file_offset_(file_offset),
@@ -1700,7 +1704,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
dex_cache_(nullptr),
no_thread_suspension_("OatWriter patching") {
patched_code_.reserve(16 * KB);
- if (writer_->HasBootImage()) {
+ if (writer_->GetCompilerOptions().IsBootImage()) {
// If we're creating the image, the address space must be ready so that we can apply patches.
CHECK(writer_->image_writer_->IsImageAddressSpaceReady());
}
@@ -1717,7 +1721,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
// Ordered method visiting is only for compiled methods.
DCHECK(writer_->MayHaveCompiledMethods());
- if (writer_->GetCompilerDriver()->GetCompilerOptions().IsAotCompilationEnabled()) {
+ if (writer_->GetCompilerOptions().IsAotCompilationEnabled()) {
// Only need to set the dex cache if we have compilation. Other modes might have unloaded it.
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
dex_cache_ = class_linker_->FindDexCache(Thread::Current(), *dex_file);
@@ -1793,6 +1797,14 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
uint32_t literal_offset = patch.LiteralOffset();
switch (patch.GetType()) {
+ case LinkerPatch::Type::kIntrinsicReference: {
+ uint32_t target_offset = GetTargetIntrinsicReferenceOffset(patch);
+ writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
+ patch,
+ offset_ + literal_offset,
+ target_offset);
+ break;
+ }
case LinkerPatch::Type::kDataBimgRelRo: {
uint32_t target_offset =
writer_->data_bimg_rel_ro_start_ +
@@ -1948,7 +1960,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
const void* oat_code_offset =
target->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
if (oat_code_offset != 0) {
- DCHECK(!writer_->HasBootImage());
+ DCHECK(!writer_->GetCompilerOptions().IsBootImage());
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(oat_code_offset));
DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(oat_code_offset));
@@ -1985,13 +1997,24 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
ObjPtr<mirror::String> string =
linker->LookupString(patch.TargetStringIndex(), GetDexCache(patch.TargetStringDexFile()));
DCHECK(string != nullptr);
- DCHECK(writer_->HasBootImage() ||
+ DCHECK(writer_->GetCompilerOptions().IsBootImage() ||
Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
return string;
}
+ uint32_t GetTargetIntrinsicReferenceOffset(const LinkerPatch& patch)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(writer_->GetCompilerOptions().IsBootImage());
+ const void* address =
+ writer_->image_writer_->GetIntrinsicReferenceAddress(patch.IntrinsicData());
+ size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
+ uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
+ // TODO: Clean up offset types. The target offset must be treated as signed.
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(address) - oat_data_begin);
+ }
+
uint32_t GetTargetMethodOffset(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(writer_->HasBootImage());
+ DCHECK(writer_->GetCompilerOptions().IsBootImage());
method = writer_->image_writer_->GetImageMethodAddress(method);
size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
@@ -2001,7 +2024,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
uint32_t GetTargetObjectOffset(ObjPtr<mirror::Object> object)
REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(writer_->HasBootImage());
+ DCHECK(writer_->GetCompilerOptions().IsBootImage());
object = writer_->image_writer_->GetImageAddress(object.Ptr());
size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
@@ -2011,7 +2034,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (writer_->HasBootImage()) {
+ if (writer_->GetCompilerOptions().IsBootImage()) {
object = writer_->image_writer_->GetImageAddress(object);
} else {
// NOTE: We're using linker patches for app->boot references when the image can
@@ -2032,7 +2055,7 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t address = target_offset;
- if (writer_->HasBootImage()) {
+ if (writer_->GetCompilerOptions().IsBootImage()) {
size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
// TODO: Clean up offset types.
// The target_offset must be treated as signed for cross-oat patching.
@@ -2202,13 +2225,11 @@ bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) {
return true;
}
-size_t OatWriter::InitOatHeader(InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- uint32_t num_dex_files,
+size_t OatWriter::InitOatHeader(uint32_t num_dex_files,
SafeMap<std::string, std::string>* key_value_store) {
TimingLogger::ScopedTiming split("InitOatHeader", timings_);
- oat_header_.reset(OatHeader::Create(instruction_set,
- instruction_set_features,
+ oat_header_.reset(OatHeader::Create(GetCompilerOptions().GetInstructionSet(),
+ GetCompilerOptions().GetInstructionSetFeatures(),
num_dex_files,
key_value_store));
size_oat_header_ += sizeof(OatHeader);
@@ -2388,9 +2409,9 @@ size_t OatWriter::InitOatCode(size_t offset) {
// TODO: Remove unused trampoline offsets from the OatHeader (requires oat version change).
oat_header_->SetInterpreterToInterpreterBridgeOffset(0);
oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0);
- if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
- InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
- const bool generate_debug_info = compiler_driver_->GetCompilerOptions().GenerateAnyDebugInfo();
+ if (GetCompilerOptions().IsBootImage()) {
+ InstructionSet instruction_set = compiler_options_.GetInstructionSet();
+ const bool generate_debug_info = GetCompilerOptions().GenerateAnyDebugInfo();
size_t adjusted_offset = offset;
#define DO_TRAMPOLINE(field, fn_name) \
@@ -2428,7 +2449,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
}
size_t OatWriter::InitOatCodeDexFiles(size_t offset) {
- if (!compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ if (!GetCompilerOptions().IsAnyCompilationEnabled()) {
if (kOatWriterDebugOatCodeLayout) {
LOG(INFO) << "InitOatCodeDexFiles: OatWriter("
<< this << "), "
@@ -2512,7 +2533,7 @@ void OatWriter::InitBssLayout(InstructionSet instruction_set) {
}
DCHECK_EQ(bss_size_, 0u);
- if (HasBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
DCHECK(bss_string_entries_.empty());
}
if (bss_method_entries_.empty() &&
@@ -2741,7 +2762,7 @@ bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
}
size_t current_offset = start_offset;
- if (compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
+ if (GetCompilerOptions().IsQuickeningCompilationEnabled()) {
std::vector<uint32_t> dex_files_indices;
WriteQuickeningInfoMethodVisitor write_quicken_info_visitor(this, vdex_out);
if (!write_quicken_info_visitor.VisitDexMethods(*dex_files_)) {
@@ -3019,7 +3040,7 @@ bool OatWriter::WriteHeader(OutputStream* out,
oat_header_->SetImageFileLocationOatChecksum(image_file_location_oat_checksum);
oat_header_->SetImageFileLocationOatDataBegin(image_file_location_oat_begin);
- if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
+ if (GetCompilerOptions().IsBootImage()) {
CHECK_EQ(image_patch_delta, 0);
CHECK_EQ(oat_header_->GetImagePatchDelta(), 0);
} else {
@@ -3283,8 +3304,8 @@ size_t OatWriter::WriteOatDexFiles(OutputStream* out, size_t file_offset, size_t
}
size_t OatWriter::WriteCode(OutputStream* out, size_t file_offset, size_t relative_offset) {
- if (compiler_driver_->GetCompilerOptions().IsBootImage()) {
- InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
+ if (GetCompilerOptions().IsBootImage()) {
+ InstructionSet instruction_set = compiler_options_.GetInstructionSet();
#define DO_TRAMPOLINE(field) \
do { \
@@ -3314,7 +3335,7 @@ size_t OatWriter::WriteCode(OutputStream* out, size_t file_offset, size_t relati
size_t OatWriter::WriteCodeDexFiles(OutputStream* out,
size_t file_offset,
size_t relative_offset) {
- if (!compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ if (!GetCompilerOptions().IsAnyCompilationEnabled()) {
// As with InitOatCodeDexFiles, also skip the writer if
// compilation was disabled.
if (kOatWriterDebugOatCodeLayout) {
@@ -3642,7 +3663,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
dex_file = dex_file_loader.Open(location,
zip_entry->GetCrc32(),
std::move(mem_map),
- /* verify */ !compiling_boot_image_,
+ /* verify */ !GetCompilerOptions().IsBootImage(),
/* verify_checksum */ true,
&error_msg);
} else if (oat_dex_file->source_.IsRawFile()) {
@@ -3654,7 +3675,7 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
}
TimingLogger::ScopedTiming extract("Open", timings_);
dex_file = dex_file_loader.OpenDex(dup_fd, location,
- /* verify */ !compiling_boot_image_,
+ /* verify */ !GetCompilerOptions().IsBootImage(),
/* verify_checksum */ true,
/* mmap_shared */ false,
&error_msg);
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 619743ef14..298859bb38 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -27,7 +27,6 @@
#include "base/os.h"
#include "base/mem_map.h"
#include "base/safe_map.h"
-#include "compiler.h"
#include "debug/debug_info.h"
#include "dex/compact_dex_level.h"
#include "dex/method_reference.h"
@@ -42,6 +41,7 @@ namespace art {
class BitVector;
class CompiledMethod;
class CompilerDriver;
+class CompilerOptions;
class DexContainer;
class ProfileCompilationInfo;
class TimingLogger;
@@ -63,6 +63,12 @@ class ImageWriter;
class MultiOatRelativePatcher;
class OutputStream;
+enum class CopyOption {
+ kNever,
+ kAlways,
+ kOnlyIfCompressed
+};
+
// OatHeader variable length with count of D OatDexFiles
//
// TypeLookupTable[0] one descriptor to class def index hash table for each OatDexFile.
@@ -118,7 +124,7 @@ class OatWriter {
kDefault = kCreate
};
- OatWriter(bool compiling_boot_image,
+ OatWriter(const CompilerOptions& compiler_options,
TimingLogger* timings,
ProfileCompilationInfo* info,
CompactDexLevel compact_dex_level);
@@ -172,25 +178,19 @@ class OatWriter {
// and the compiler will just re-use the existing vdex file.
bool WriteAndOpenDexFiles(File* vdex_file,
OutputStream* oat_rodata,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
SafeMap<std::string, std::string>* key_value_store,
bool verify,
bool update_input_vdex,
CopyOption copy_dex_files,
/*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+ // Initialize the writer with the given parameters.
+ void Initialize(const CompilerDriver* compiler_driver,
+ ImageWriter* image_writer,
+ const std::vector<const DexFile*>& dex_files);
bool WriteQuickeningInfo(OutputStream* vdex_out);
bool WriteVerifierDeps(OutputStream* vdex_out, verifier::VerifierDeps* verifier_deps);
bool WriteChecksumsAndVdexHeader(OutputStream* vdex_out);
- // Initialize the writer with the given parameters.
- void Initialize(const CompilerDriver* compiler,
- ImageWriter* image_writer,
- const std::vector<const DexFile*>& dex_files) {
- compiler_driver_ = compiler;
- image_writer_ = image_writer;
- dex_files_ = &dex_files;
- }
// Prepare layout of remaining data.
void PrepareLayout(MultiOatRelativePatcher* relative_patcher);
@@ -215,10 +215,6 @@ class OatWriter {
return image_writer_ != nullptr;
}
- bool HasBootImage() const {
- return compiling_boot_image_;
- }
-
const OatHeader& GetOatHeader() const {
return *oat_header_;
}
@@ -263,6 +259,10 @@ class OatWriter {
return compiler_driver_;
}
+ const CompilerOptions& GetCompilerOptions() const {
+ return compiler_options_;
+ }
+
private:
class DexFileSource;
class OatClassHeader;
@@ -325,10 +325,7 @@ class OatWriter {
/*out*/ std::vector<std::unique_ptr<MemMap>>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
- size_t InitOatHeader(InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features,
- uint32_t num_dex_files,
- SafeMap<std::string, std::string>* key_value_store);
+ size_t InitOatHeader(uint32_t num_dex_files, SafeMap<std::string, std::string>* key_value_store);
size_t InitClassOffsets(size_t offset);
size_t InitOatClasses(size_t offset);
size_t InitOatMaps(size_t offset);
@@ -388,8 +385,8 @@ class OatWriter {
dchecked_vector<debug::MethodDebugInfo> method_info_;
const CompilerDriver* compiler_driver_;
+ const CompilerOptions& compiler_options_;
ImageWriter* image_writer_;
- const bool compiling_boot_image_;
// Whether the dex files being compiled are going to be extracted to the vdex.
bool extract_dex_files_into_vdex_;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index d0a6eb9ff2..7aa1ebb98e 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -86,33 +86,17 @@ class OatTest : public CommonCompilerTest {
}
}
- void SetupCompiler(Compiler::Kind compiler_kind,
- InstructionSet insn_set,
- const std::vector<std::string>& compiler_options,
- /*out*/std::string* error_msg) {
- ASSERT_TRUE(error_msg != nullptr);
- insn_features_ = InstructionSetFeatures::FromVariant(insn_set, "default", error_msg);
- ASSERT_TRUE(insn_features_ != nullptr) << *error_msg;
- compiler_options_.reset(new CompilerOptions);
+ void SetupCompiler(const std::vector<std::string>& compiler_options) {
+ std::string error_msg;
if (!compiler_options_->ParseCompilerOptions(compiler_options,
false /* ignore_unrecognized */,
- error_msg)) {
- LOG(FATAL) << *error_msg;
+ &error_msg)) {
+ LOG(FATAL) << error_msg;
UNREACHABLE();
}
- verification_results_.reset(new VerificationResults(compiler_options_.get()));
callbacks_.reset(new QuickCompilerCallbacks(CompilerCallbacks::CallbackMode::kCompileApp));
callbacks_->SetVerificationResults(verification_results_.get());
Runtime::Current()->SetCompilerCallbacks(callbacks_.get());
- compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
- verification_results_.get(),
- compiler_kind,
- insn_set,
- insn_features_.get(),
- /* image_classes */ nullptr,
- /* thread_count */ 2,
- /* swap_fd */ -1,
- /* profile_compilation_info */ nullptr));
}
bool WriteElf(File* vdex_file,
@@ -121,7 +105,8 @@ class OatTest : public CommonCompilerTest {
SafeMap<std::string, std::string>& key_value_store,
bool verify) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false,
+ ClearBootImageOption();
+ OatWriter oat_writer(*compiler_options_,
&timings,
/*profile_compilation_info*/nullptr,
CompactDexLevel::kCompactDexLevelNone);
@@ -145,7 +130,8 @@ class OatTest : public CommonCompilerTest {
bool verify,
ProfileCompilationInfo* profile_compilation_info) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false,
+ ClearBootImageOption();
+ OatWriter oat_writer(*compiler_options_,
&timings,
profile_compilation_info,
CompactDexLevel::kCompactDexLevelNone);
@@ -164,7 +150,8 @@ class OatTest : public CommonCompilerTest {
SafeMap<std::string, std::string>& key_value_store,
bool verify) {
TimingLogger timings("WriteElf", false, false);
- OatWriter oat_writer(/*compiling_boot_image*/false,
+ ClearBootImageOption();
+ OatWriter oat_writer(*compiler_options_,
&timings,
/*profile_compilation_info*/nullptr,
CompactDexLevel::kCompactDexLevelNone);
@@ -180,9 +167,7 @@ class OatTest : public CommonCompilerTest {
SafeMap<std::string, std::string>& key_value_store,
bool verify) {
std::unique_ptr<ElfWriter> elf_writer = CreateElfWriterQuick(
- compiler_driver_->GetInstructionSet(),
- compiler_driver_->GetInstructionSetFeatures(),
- &compiler_driver_->GetCompilerOptions(),
+ compiler_driver_->GetCompilerOptions(),
oat_file);
elf_writer->Start();
OutputStream* oat_rodata = elf_writer->StartRoData();
@@ -191,8 +176,6 @@ class OatTest : public CommonCompilerTest {
if (!oat_writer.WriteAndOpenDexFiles(
vdex_file,
oat_rodata,
- compiler_driver_->GetInstructionSet(),
- compiler_driver_->GetInstructionSetFeatures(),
&key_value_store,
verify,
/* update_input_vdex */ false,
@@ -210,8 +193,8 @@ class OatTest : public CommonCompilerTest {
ScopedObjectAccess soa(Thread::Current());
class_linker->RegisterDexFile(*dex_file, nullptr);
}
- MultiOatRelativePatcher patcher(compiler_driver_->GetInstructionSet(),
- instruction_set_features_.get(),
+ MultiOatRelativePatcher patcher(compiler_options_->GetInstructionSet(),
+ compiler_options_->GetInstructionSetFeatures(),
compiler_driver_->GetCompiledMethodStorage());
oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
oat_writer.PrepareLayout(&patcher);
@@ -278,7 +261,6 @@ class OatTest : public CommonCompilerTest {
void TestZipFileInput(bool verify);
void TestZipFileInputWithEmptyDex();
- std::unique_ptr<const InstructionSetFeatures> insn_features_;
std::unique_ptr<QuickCompilerCallbacks> callbacks_;
std::vector<std::unique_ptr<MemMap>> opened_dex_files_maps_;
@@ -400,16 +382,13 @@ TEST_F(OatTest, WriteRead) {
TimingLogger timings("OatTest::WriteRead", false, false);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- // TODO: make selectable.
- Compiler::Kind compiler_kind = Compiler::kQuick;
- InstructionSet insn_set = kIsTargetBuild ? InstructionSet::kThumb2 : InstructionSet::kX86;
std::string error_msg;
- SetupCompiler(compiler_kind, insn_set, std::vector<std::string>(), /*out*/ &error_msg);
+ SetupCompiler(std::vector<std::string>());
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
- compiler_driver_->SetDexFilesForOatFile(class_linker->GetBootClassPath());
+ SetDexFilesForOatFile(class_linker->GetBootClassPath());
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2);
}
@@ -446,8 +425,8 @@ TEST_F(OatTest, WriteRead) {
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex_file = *java_lang_dex_file_;
uint32_t dex_file_checksum = dex_file.GetLocationChecksum();
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
- &dex_file_checksum);
+ const OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
+ &dex_file_checksum);
ASSERT_TRUE(oat_dex_file != nullptr);
CHECK_EQ(dex_file.GetLocationChecksum(), oat_dex_file->GetDexFileLocationChecksum());
ScopedObjectAccess soa(Thread::Current());
@@ -497,7 +476,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(76U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(164 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(165 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
@@ -524,14 +503,9 @@ TEST_F(OatTest, OatHeaderIsValid) {
TEST_F(OatTest, EmptyTextSection) {
TimingLogger timings("OatTest::EmptyTextSection", false, false);
- // TODO: make selectable.
- Compiler::Kind compiler_kind = Compiler::kQuick;
- InstructionSet insn_set = kRuntimeISA;
- if (insn_set == InstructionSet::kArm) insn_set = InstructionSet::kThumb2;
- std::string error_msg;
std::vector<std::string> compiler_options;
compiler_options.push_back("--compiler-filter=extract");
- SetupCompiler(compiler_kind, insn_set, compiler_options, /*out*/ &error_msg);
+ SetupCompiler(compiler_options);
jobject class_loader;
{
@@ -547,7 +521,7 @@ TEST_F(OatTest, EmptyTextSection) {
ScopedObjectAccess soa(Thread::Current());
class_linker->RegisterDexFile(*dex_file, soa.Decode<mirror::ClassLoader>(class_loader));
}
- compiler_driver_->SetDexFilesForOatFile(dex_files);
+ SetDexFilesForOatFile(dex_files);
compiler_driver_->CompileAll(class_loader, dex_files, &timings);
ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
@@ -560,6 +534,7 @@ TEST_F(OatTest, EmptyTextSection) {
/* verify */ false);
ASSERT_TRUE(success);
+ std::string error_msg;
std::unique_ptr<OatFile> oat_file(OatFile::Open(/* zip_fd */ -1,
tmp_oat.GetFilename(),
tmp_oat.GetFilename(),
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index b4123eea3e..ae58b54863 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -22,6 +22,7 @@
#include "base/array_ref.h"
#include "base/globals.h"
#include "base/macros.h"
+#include "common_compiler_test.h"
#include "compiled_method-inl.h"
#include "dex/verification_results.h"
#include "dex/method_reference.h"
@@ -38,38 +39,40 @@ namespace art {
namespace linker {
// Base class providing infrastructure for architecture-specific tests.
-class RelativePatcherTest : public testing::Test {
+class RelativePatcherTest : public CommonCompilerTest {
protected:
RelativePatcherTest(InstructionSet instruction_set, const std::string& variant)
- : compiler_options_(),
- verification_results_(&compiler_options_),
- driver_(&compiler_options_,
- &verification_results_,
- Compiler::kQuick,
- instruction_set,
- /* instruction_set_features*/ nullptr,
- /* image_classes */ nullptr,
- /* thread_count */ 1u,
- /* swap_fd */ -1,
- /* profile_compilation_info */ nullptr),
- error_msg_(),
- instruction_set_(instruction_set),
- features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
+ : variant_(variant),
method_offset_map_(),
- patcher_(RelativePatcher::Create(instruction_set,
- features_.get(),
- &thunk_provider_,
- &method_offset_map_)),
+ patcher_(nullptr),
bss_begin_(0u),
compiled_method_refs_(),
compiled_methods_(),
patched_code_(),
output_(),
out_("test output stream", &output_) {
- CHECK(error_msg_.empty()) << instruction_set << "/" << variant;
+ // Override CommonCompilerTest's defaults.
+ instruction_set_ = instruction_set;
+ number_of_threads_ = 1u;
patched_code_.reserve(16 * KB);
}
+ void SetUp() OVERRIDE {
+ OverrideInstructionSetFeatures(instruction_set_, variant_);
+ CommonCompilerTest::SetUp();
+
+ patcher_ = RelativePatcher::Create(compiler_options_->GetInstructionSet(),
+ compiler_options_->GetInstructionSetFeatures(),
+ &thunk_provider_,
+ &method_offset_map_);
+ }
+
+ void TearDown() OVERRIDE {
+ compiled_methods_.clear();
+ patcher_.reset();
+ CommonCompilerTest::TearDown();
+ }
+
MethodReference MethodRef(uint32_t method_idx) {
CHECK_NE(method_idx, 0u);
return MethodReference(nullptr, method_idx);
@@ -81,7 +84,7 @@ class RelativePatcherTest : public testing::Test {
const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>()) {
compiled_method_refs_.push_back(method_ref);
compiled_methods_.emplace_back(new CompiledMethod(
- &driver_,
+ compiler_driver_.get(),
instruction_set_,
code,
/* frame_size_in_bytes */ 0u,
@@ -333,12 +336,7 @@ class RelativePatcherTest : public testing::Test {
static const uint32_t kTrampolineSize = 4u;
static const uint32_t kTrampolineOffset = 0u;
- CompilerOptions compiler_options_;
- VerificationResults verification_results_;
- CompilerDriver driver_; // Needed for constructing CompiledMethod.
- std::string error_msg_;
- InstructionSet instruction_set_;
- std::unique_ptr<const InstructionSetFeatures> features_;
+ std::string variant_;
ThunkProvider thunk_provider_;
MethodOffsetMap method_offset_map_;
std::unique_ptr<RelativePatcher> patcher_;
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 85778b6411..82610353b4 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -612,7 +612,7 @@ static void dumpClassDef(const DexFile* pDexFile, int idx) {
pClassDef.class_data_off_, pClassDef.class_data_off_);
// Fields and methods.
- ClassAccessor accessor(*pDexFile, pClassDef);
+ ClassAccessor accessor(*pDexFile, idx);
fprintf(gOutFile, "static_fields_size : %d\n", accessor.NumStaticFields());
fprintf(gOutFile, "instance_fields_size: %d\n", accessor.NumInstanceFields());
fprintf(gOutFile, "direct_methods_size : %d\n", accessor.NumDirectMethods());
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
index 69ee0682a3..7e534ed359 100644
--- a/dexdump/dexdump_cfg.cc
+++ b/dexdump/dexdump_cfg.cc
@@ -120,7 +120,7 @@ static void dumpMethodCFGImpl(const DexFile* dex_file,
os << inst_str.substr(cur_start, next_escape - cur_start);
// Escape all necessary characters.
while (next_escape < inst_str.size()) {
- char c = inst_str.at(next_escape);
+ char c = inst_str[next_escape];
if (c == '"' || c == '{' || c == '}' || c == '<' || c == '>') {
os << '\\' << c;
} else {
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 2b4144c611..00fb0af710 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -40,9 +40,8 @@ CompactDexWriter::Container::Container(bool dedupe_code_items)
uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
const uint32_t start_offset = stream->Tell();
- const dex_ir::Collections& collections = header_->GetCollections();
// Debug offsets for method indexes. 0 means no debug info.
- std::vector<uint32_t> debug_info_offsets(collections.MethodIdsSize(), 0u);
+ std::vector<uint32_t> debug_info_offsets(header_->MethodIds().Size(), 0u);
static constexpr InvokeType invoke_types[] = {
kDirect,
@@ -50,7 +49,7 @@ uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
};
for (InvokeType invoke_type : invoke_types) {
- for (const std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
// Skip classes that are not defined in this dex file.
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data == nullptr) {
@@ -59,8 +58,8 @@ uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(Stream* stream) {
for (auto& method : *(invoke_type == InvokeType::kDirect
? class_data->DirectMethods()
: class_data->VirtualMethods())) {
- const dex_ir::MethodId* method_id = method->GetMethodId();
- dex_ir::CodeItem* code_item = method->GetCodeItem();
+ const dex_ir::MethodId* method_id = method.GetMethodId();
+ dex_ir::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
const uint32_t debug_info_offset = code_item->DebugInfo()->GetOffset();
const uint32_t method_idx = method_id->GetIndex();
@@ -232,14 +231,13 @@ uint32_t CompactDexWriter::Deduper::Dedupe(uint32_t data_start,
}
void CompactDexWriter::SortDebugInfosByMethodIndex() {
- dex_ir::Collections& collections = header_->GetCollections();
static constexpr InvokeType invoke_types[] = {
kDirect,
kVirtual
};
std::map<const dex_ir::DebugInfoItem*, uint32_t> method_idx_map;
for (InvokeType invoke_type : invoke_types) {
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
// Skip classes that are not defined in this dex file.
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data == nullptr) {
@@ -248,8 +246,8 @@ void CompactDexWriter::SortDebugInfosByMethodIndex() {
for (auto& method : *(invoke_type == InvokeType::kDirect
? class_data->DirectMethods()
: class_data->VirtualMethods())) {
- const dex_ir::MethodId* method_id = method->GetMethodId();
- dex_ir::CodeItem* code_item = method->GetCodeItem();
+ const dex_ir::MethodId* method_id = method.GetMethodId();
+ dex_ir::CodeItem* code_item = method.GetCodeItem();
if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
const dex_ir::DebugInfoItem* debug_item = code_item->DebugInfo();
method_idx_map.insert(std::make_pair(debug_item, method_id->GetIndex()));
@@ -257,8 +255,8 @@ void CompactDexWriter::SortDebugInfosByMethodIndex() {
}
}
}
- std::sort(collections.DebugInfoItems().begin(),
- collections.DebugInfoItems().end(),
+ std::sort(header_->DebugInfoItems().begin(),
+ header_->DebugInfoItems().end(),
[&](const std::unique_ptr<dex_ir::DebugInfoItem>& a,
const std::unique_ptr<dex_ir::DebugInfoItem>& b) {
auto it_a = method_idx_map.find(a.get());
@@ -282,20 +280,19 @@ void CompactDexWriter::WriteHeader(Stream* stream) {
header.endian_tag_ = header_->EndianTag();
header.link_size_ = header_->LinkSize();
header.link_off_ = header_->LinkOffset();
- const dex_ir::Collections& collections = header_->GetCollections();
- header.map_off_ = collections.MapListOffset();
- header.string_ids_size_ = collections.StringIdsSize();
- header.string_ids_off_ = collections.StringIdsOffset();
- header.type_ids_size_ = collections.TypeIdsSize();
- header.type_ids_off_ = collections.TypeIdsOffset();
- header.proto_ids_size_ = collections.ProtoIdsSize();
- header.proto_ids_off_ = collections.ProtoIdsOffset();
- header.field_ids_size_ = collections.FieldIdsSize();
- header.field_ids_off_ = collections.FieldIdsOffset();
- header.method_ids_size_ = collections.MethodIdsSize();
- header.method_ids_off_ = collections.MethodIdsOffset();
- header.class_defs_size_ = collections.ClassDefsSize();
- header.class_defs_off_ = collections.ClassDefsOffset();
+ header.map_off_ = header_->MapListOffset();
+ header.string_ids_size_ = header_->StringIds().Size();
+ header.string_ids_off_ = header_->StringIds().GetOffset();
+ header.type_ids_size_ = header_->TypeIds().Size();
+ header.type_ids_off_ = header_->TypeIds().GetOffset();
+ header.proto_ids_size_ = header_->ProtoIds().Size();
+ header.proto_ids_off_ = header_->ProtoIds().GetOffset();
+ header.field_ids_size_ = header_->FieldIds().Size();
+ header.field_ids_off_ = header_->FieldIds().GetOffset();
+ header.method_ids_size_ = header_->MethodIds().Size();
+ header.method_ids_off_ = header_->MethodIds().GetOffset();
+ header.class_defs_size_ = header_->ClassDefs().Size();
+ header.class_defs_off_ = header_->ClassDefs().GetOffset();
header.data_size_ = header_->DataSize();
header.data_off_ = header_->DataOffset();
header.owned_data_begin_ = owned_data_begin_;
@@ -332,16 +329,15 @@ void CompactDexWriter::WriteStringData(Stream* stream, dex_ir::StringData* strin
}
bool CompactDexWriter::CanGenerateCompactDex(std::string* error_msg) {
- dex_ir::Collections& collections = header_->GetCollections();
static constexpr InvokeType invoke_types[] = {
kDirect,
kVirtual
};
- std::vector<bool> saw_method_id(collections.MethodIdsSize(), false);
- std::vector<dex_ir::CodeItem*> method_id_code_item(collections.MethodIdsSize(), nullptr);
- std::vector<dex_ir::DebugInfoItem*> method_id_debug_info(collections.MethodIdsSize(), nullptr);
+ std::vector<bool> saw_method_id(header_->MethodIds().Size(), false);
+ std::vector<dex_ir::CodeItem*> method_id_code_item(header_->MethodIds().Size(), nullptr);
+ std::vector<dex_ir::DebugInfoItem*> method_id_debug_info(header_->MethodIds().Size(), nullptr);
for (InvokeType invoke_type : invoke_types) {
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
// Skip classes that are not defined in this dex file.
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data == nullptr) {
@@ -350,8 +346,8 @@ bool CompactDexWriter::CanGenerateCompactDex(std::string* error_msg) {
for (auto& method : *(invoke_type == InvokeType::kDirect
? class_data->DirectMethods()
: class_data->VirtualMethods())) {
- const uint32_t idx = method->GetMethodId()->GetIndex();
- dex_ir::CodeItem* code_item = method->GetCodeItem();
+ const uint32_t idx = method.GetMethodId()->GetIndex();
+ dex_ir::CodeItem* code_item = method.GetCodeItem();
dex_ir:: DebugInfoItem* debug_info_item = nullptr;
if (code_item != nullptr) {
debug_info_item = code_item->DebugInfo();
@@ -407,8 +403,6 @@ bool CompactDexWriter::Write(DexContainer* output, std::string* error_msg) {
// Starting offset is right after the header.
main_stream->Seek(GetHeaderSize());
- dex_ir::Collections& collection = header_->GetCollections();
-
// Based on: https://source.android.com/devices/tech/dalvik/dex-format
// Since the offsets may not be calculated already, the writing must be done in the correct order.
const uint32_t string_ids_offset = main_stream->Tell();
@@ -469,16 +463,16 @@ bool CompactDexWriter::Write(DexContainer* output, std::string* error_msg) {
// Write the map list.
if (compute_offsets_) {
data_stream->AlignTo(SectionAlignment(DexFile::kDexTypeMapList));
- collection.SetMapListOffset(data_stream->Tell());
+ header_->SetMapListOffset(data_stream->Tell());
} else {
- data_stream->Seek(collection.MapListOffset());
+ data_stream->Seek(header_->MapListOffset());
}
// Map items are included in the data section.
GenerateAndWriteMapItems(data_stream);
// Write link data if it exists.
- const std::vector<uint8_t>& link_data = collection.LinkData();
+ const std::vector<uint8_t>& link_data = header_->LinkData();
if (link_data.size() > 0) {
CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
if (compute_offsets_) {
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index 4b142a85bb..e7d5ed953d 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -22,7 +22,7 @@
#include <memory> // For unique_ptr
#include <unordered_map>
-#include "base/utils.h"
+#include "base/data_hash.h"
#include "dex_writer.h"
namespace art {
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index b7d9db6da5..3917847ea7 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -30,825 +30,11 @@
namespace art {
namespace dex_ir {
-static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
- uint64_t value = 0;
- for (uint32_t i = 0; i <= length; i++) {
- value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
- }
- if (sign_extend) {
- int shift = (7 - length) * 8;
- return (static_cast<int64_t>(value) << shift) >> shift;
- }
- return value;
-}
-
-static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
- const uint8_t* stream = debug_info_stream;
- DecodeUnsignedLeb128(&stream); // line_start
- uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
- for (uint32_t i = 0; i < parameters_size; ++i) {
- DecodeUnsignedLeb128P1(&stream); // Parameter name.
- }
-
- for (;;) {
- uint8_t opcode = *stream++;
- switch (opcode) {
- case DexFile::DBG_END_SEQUENCE:
- return stream - debug_info_stream; // end of stream.
- case DexFile::DBG_ADVANCE_PC:
- DecodeUnsignedLeb128(&stream); // addr_diff
- break;
- case DexFile::DBG_ADVANCE_LINE:
- DecodeSignedLeb128(&stream); // line_diff
- break;
- case DexFile::DBG_START_LOCAL:
- DecodeUnsignedLeb128(&stream); // register_num
- DecodeUnsignedLeb128P1(&stream); // name_idx
- DecodeUnsignedLeb128P1(&stream); // type_idx
- break;
- case DexFile::DBG_START_LOCAL_EXTENDED:
- DecodeUnsignedLeb128(&stream); // register_num
- DecodeUnsignedLeb128P1(&stream); // name_idx
- DecodeUnsignedLeb128P1(&stream); // type_idx
- DecodeUnsignedLeb128P1(&stream); // sig_idx
- break;
- case DexFile::DBG_END_LOCAL:
- case DexFile::DBG_RESTART_LOCAL:
- DecodeUnsignedLeb128(&stream); // register_num
- break;
- case DexFile::DBG_SET_PROLOGUE_END:
- case DexFile::DBG_SET_EPILOGUE_BEGIN:
- break;
- case DexFile::DBG_SET_FILE: {
- DecodeUnsignedLeb128P1(&stream); // name_idx
- break;
- }
- default: {
- break;
- }
- }
- }
-}
-
-static bool GetIdFromInstruction(Collections& collections,
- const Instruction* dec_insn,
- std::vector<TypeId*>* type_ids,
- std::vector<StringId*>* string_ids,
- std::vector<MethodId*>* method_ids,
- std::vector<FieldId*>* field_ids) {
- // Determine index and width of the string.
- uint32_t index = 0;
- switch (Instruction::FormatOf(dec_insn->Opcode())) {
- // SOME NOT SUPPORTED:
- // case Instruction::k20bc:
- case Instruction::k21c:
- case Instruction::k35c:
- // case Instruction::k35ms:
- case Instruction::k3rc:
- // case Instruction::k3rms:
- // case Instruction::k35mi:
- // case Instruction::k3rmi:
- case Instruction::k45cc:
- case Instruction::k4rcc:
- index = dec_insn->VRegB();
- break;
- case Instruction::k31c:
- index = dec_insn->VRegB();
- break;
- case Instruction::k22c:
- // case Instruction::k22cs:
- index = dec_insn->VRegC();
- break;
- default:
- break;
- } // switch
-
- // Determine index type, and add reference to the appropriate collection.
- switch (Instruction::IndexTypeOf(dec_insn->Opcode())) {
- case Instruction::kIndexTypeRef:
- if (index < collections.TypeIdsSize()) {
- type_ids->push_back(collections.GetTypeId(index));
- return true;
- }
- break;
- case Instruction::kIndexStringRef:
- if (index < collections.StringIdsSize()) {
- string_ids->push_back(collections.GetStringId(index));
- return true;
- }
- break;
- case Instruction::kIndexMethodRef:
- case Instruction::kIndexMethodAndProtoRef:
- if (index < collections.MethodIdsSize()) {
- method_ids->push_back(collections.GetMethodId(index));
- return true;
- }
- break;
- case Instruction::kIndexFieldRef:
- if (index < collections.FieldIdsSize()) {
- field_ids->push_back(collections.GetFieldId(index));
- return true;
- }
- break;
- case Instruction::kIndexUnknown:
- case Instruction::kIndexNone:
- case Instruction::kIndexVtableOffset:
- case Instruction::kIndexFieldOffset:
- default:
- break;
- } // switch
- return false;
-}
-
-/*
- * Get all the types, strings, methods, and fields referred to from bytecode.
- */
-static bool GetIdsFromByteCode(Collections& collections,
- const CodeItem* code,
- std::vector<TypeId*>* type_ids,
- std::vector<StringId*>* string_ids,
- std::vector<MethodId*>* method_ids,
- std::vector<FieldId*>* field_ids) {
- bool has_id = false;
- IterationRange<DexInstructionIterator> instructions = code->Instructions();
- SafeDexInstructionIterator it(instructions.begin(), instructions.end());
- for (; !it.IsErrorState() && it < instructions.end(); ++it) {
- // In case the instruction goes past the end of the code item, make sure to not process it.
- SafeDexInstructionIterator next = it;
- ++next;
- if (next.IsErrorState()) {
- break;
- }
- has_id |= GetIdFromInstruction(collections,
- &it.Inst(),
- type_ids,
- string_ids,
- method_ids,
- field_ids);
- } // for
- return has_id;
-}
-
-EncodedValue* Collections::ReadEncodedValue(const DexFile& dex_file, const uint8_t** data) {
- const uint8_t encoded_value = *(*data)++;
- const uint8_t type = encoded_value & 0x1f;
- EncodedValue* item = new EncodedValue(type);
- ReadEncodedValue(dex_file, data, type, encoded_value >> 5, item);
- return item;
-}
-
-EncodedValue* Collections::ReadEncodedValue(const DexFile& dex_file,
- const uint8_t** data,
- uint8_t type,
- uint8_t length) {
- EncodedValue* item = new EncodedValue(type);
- ReadEncodedValue(dex_file, data, type, length, item);
- return item;
-}
-
-void Collections::ReadEncodedValue(const DexFile& dex_file,
- const uint8_t** data,
- uint8_t type,
- uint8_t length,
- EncodedValue* item) {
- switch (type) {
- case DexFile::kDexAnnotationByte:
- item->SetByte(static_cast<int8_t>(ReadVarWidth(data, length, false)));
- break;
- case DexFile::kDexAnnotationShort:
- item->SetShort(static_cast<int16_t>(ReadVarWidth(data, length, true)));
- break;
- case DexFile::kDexAnnotationChar:
- item->SetChar(static_cast<uint16_t>(ReadVarWidth(data, length, false)));
- break;
- case DexFile::kDexAnnotationInt:
- item->SetInt(static_cast<int32_t>(ReadVarWidth(data, length, true)));
- break;
- case DexFile::kDexAnnotationLong:
- item->SetLong(static_cast<int64_t>(ReadVarWidth(data, length, true)));
- break;
- case DexFile::kDexAnnotationFloat: {
- // Fill on right.
- union {
- float f;
- uint32_t data;
- } conv;
- conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
- item->SetFloat(conv.f);
- break;
- }
- case DexFile::kDexAnnotationDouble: {
- // Fill on right.
- union {
- double d;
- uint64_t data;
- } conv;
- conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
- item->SetDouble(conv.d);
- break;
- }
- case DexFile::kDexAnnotationMethodType: {
- const uint32_t proto_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetProtoId(GetProtoId(proto_index));
- break;
- }
- case DexFile::kDexAnnotationMethodHandle: {
- const uint32_t method_handle_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetMethodHandle(GetMethodHandle(method_handle_index));
- break;
- }
- case DexFile::kDexAnnotationString: {
- const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetStringId(GetStringId(string_index));
- break;
- }
- case DexFile::kDexAnnotationType: {
- const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetTypeId(GetTypeId(string_index));
- break;
- }
- case DexFile::kDexAnnotationField:
- case DexFile::kDexAnnotationEnum: {
- const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetFieldId(GetFieldId(field_index));
- break;
- }
- case DexFile::kDexAnnotationMethod: {
- const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
- item->SetMethodId(GetMethodId(method_index));
- break;
- }
- case DexFile::kDexAnnotationArray: {
- EncodedValueVector* values = new EncodedValueVector();
- const uint32_t offset = *data - dex_file.DataBegin();
- const uint32_t size = DecodeUnsignedLeb128(data);
- // Decode all elements.
- for (uint32_t i = 0; i < size; i++) {
- values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, data)));
- }
- EncodedArrayItem* array_item = new EncodedArrayItem(values);
- if (eagerly_assign_offsets_) {
- array_item->SetOffset(offset);
- }
- item->SetEncodedArray(array_item);
- break;
- }
- case DexFile::kDexAnnotationAnnotation: {
- AnnotationElementVector* elements = new AnnotationElementVector();
- const uint32_t type_idx = DecodeUnsignedLeb128(data);
- const uint32_t size = DecodeUnsignedLeb128(data);
- // Decode all name=value pairs.
- for (uint32_t i = 0; i < size; i++) {
- const uint32_t name_index = DecodeUnsignedLeb128(data);
- elements->push_back(std::unique_ptr<AnnotationElement>(
- new AnnotationElement(GetStringId(name_index), ReadEncodedValue(dex_file, data))));
- }
- item->SetEncodedAnnotation(new EncodedAnnotation(GetTypeId(type_idx), elements));
- break;
- }
- case DexFile::kDexAnnotationNull:
- break;
- case DexFile::kDexAnnotationBoolean:
- item->SetBoolean(length != 0);
- break;
- default:
- break;
- }
-}
-
-void Collections::CreateStringId(const DexFile& dex_file, uint32_t i) {
- const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
- StringData* string_data = new StringData(dex_file.GetStringData(disk_string_id));
- AddItem(string_datas_map_, string_datas_, string_data, disk_string_id.string_data_off_);
-
- StringId* string_id = new StringId(string_data);
- AddIndexedItem(string_ids_, string_id, StringIdsOffset() + i * StringId::ItemSize(), i);
-}
-
-void Collections::CreateTypeId(const DexFile& dex_file, uint32_t i) {
- const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
- TypeId* type_id = new TypeId(GetStringId(disk_type_id.descriptor_idx_.index_));
- AddIndexedItem(type_ids_, type_id, TypeIdsOffset() + i * TypeId::ItemSize(), i);
-}
-
-void Collections::CreateProtoId(const DexFile& dex_file, uint32_t i) {
- const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
- const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
- TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
-
- ProtoId* proto_id = new ProtoId(GetStringId(disk_proto_id.shorty_idx_.index_),
- GetTypeId(disk_proto_id.return_type_idx_.index_),
- parameter_type_list);
- AddIndexedItem(proto_ids_, proto_id, ProtoIdsOffset() + i * ProtoId::ItemSize(), i);
-}
-
-void Collections::CreateFieldId(const DexFile& dex_file, uint32_t i) {
- const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
- FieldId* field_id = new FieldId(GetTypeId(disk_field_id.class_idx_.index_),
- GetTypeId(disk_field_id.type_idx_.index_),
- GetStringId(disk_field_id.name_idx_.index_));
- AddIndexedItem(field_ids_, field_id, FieldIdsOffset() + i * FieldId::ItemSize(), i);
-}
-
-void Collections::CreateMethodId(const DexFile& dex_file, uint32_t i) {
- const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
- MethodId* method_id = new MethodId(GetTypeId(disk_method_id.class_idx_.index_),
- GetProtoId(disk_method_id.proto_idx_.index_),
- GetStringId(disk_method_id.name_idx_.index_));
- AddIndexedItem(method_ids_, method_id, MethodIdsOffset() + i * MethodId::ItemSize(), i);
-}
-
-void Collections::CreateClassDef(const DexFile& dex_file, uint32_t i) {
- const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
- const TypeId* class_type = GetTypeId(disk_class_def.class_idx_.index_);
- uint32_t access_flags = disk_class_def.access_flags_;
- const TypeId* superclass = GetTypeIdOrNullPtr(disk_class_def.superclass_idx_.index_);
-
- const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
- TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_);
-
- const StringId* source_file = GetStringIdOrNullPtr(disk_class_def.source_file_idx_.index_);
- // Annotations.
- AnnotationsDirectoryItem* annotations = nullptr;
- const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
- dex_file.GetAnnotationsDirectory(disk_class_def);
- if (disk_annotations_directory_item != nullptr) {
- annotations = CreateAnnotationsDirectoryItem(
- dex_file, disk_annotations_directory_item, disk_class_def.annotations_off_);
- }
- // Static field initializers.
- const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
- EncodedArrayItem* static_values =
- CreateEncodedArrayItem(dex_file, static_data, disk_class_def.static_values_off_);
- ClassData* class_data = CreateClassData(
- dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
- ClassDef* class_def = new ClassDef(class_type, access_flags, superclass, interfaces_type_list,
- source_file, annotations, static_values, class_data);
- AddIndexedItem(class_defs_, class_def, ClassDefsOffset() + i * ClassDef::ItemSize(), i);
-}
-
-TypeList* Collections::CreateTypeList(const DexFile::TypeList* dex_type_list, uint32_t offset) {
- if (dex_type_list == nullptr) {
- return nullptr;
- }
- TypeList* type_list = type_lists_map_.GetExistingObject(offset);
- if (type_list == nullptr) {
- TypeIdVector* type_vector = new TypeIdVector();
- uint32_t size = dex_type_list->Size();
- for (uint32_t index = 0; index < size; ++index) {
- type_vector->push_back(GetTypeId(dex_type_list->GetTypeItem(index).type_idx_.index_));
- }
- type_list = new TypeList(type_vector);
- AddItem(type_lists_map_, type_lists_, type_list, offset);
- }
- return type_list;
-}
-
-EncodedArrayItem* Collections::CreateEncodedArrayItem(const DexFile& dex_file,
- const uint8_t* static_data,
- uint32_t offset) {
- if (static_data == nullptr) {
- return nullptr;
- }
- EncodedArrayItem* encoded_array_item = encoded_array_items_map_.GetExistingObject(offset);
- if (encoded_array_item == nullptr) {
- uint32_t size = DecodeUnsignedLeb128(&static_data);
- EncodedValueVector* values = new EncodedValueVector();
- for (uint32_t i = 0; i < size; ++i) {
- values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, &static_data)));
- }
- // TODO: Calculate the size of the encoded array.
- encoded_array_item = new EncodedArrayItem(values);
- AddItem(encoded_array_items_map_, encoded_array_items_, encoded_array_item, offset);
- }
- return encoded_array_item;
-}
-
-void Collections::AddAnnotationsFromMapListSection(const DexFile& dex_file,
- uint32_t start_offset,
- uint32_t count) {
- uint32_t current_offset = start_offset;
- for (size_t i = 0; i < count; ++i) {
- // Annotation that we didn't process already, add it to the set.
- const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
- AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
- DCHECK(annotation_item != nullptr);
- current_offset += annotation_item->GetSize();
- }
-}
-
-AnnotationItem* Collections::CreateAnnotationItem(const DexFile& dex_file,
- const DexFile::AnnotationItem* annotation) {
- const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
- const uint32_t offset = start_data - dex_file.DataBegin();
- AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
- if (annotation_item == nullptr) {
- uint8_t visibility = annotation->visibility_;
- const uint8_t* annotation_data = annotation->annotation_;
- std::unique_ptr<EncodedValue> encoded_value(
- ReadEncodedValue(dex_file, &annotation_data, DexFile::kDexAnnotationAnnotation, 0));
- annotation_item = new AnnotationItem(visibility, encoded_value->ReleaseEncodedAnnotation());
- annotation_item->SetSize(annotation_data - start_data);
- AddItem(annotation_items_map_, annotation_items_, annotation_item, offset);
- }
- return annotation_item;
-}
-
-
-AnnotationSetItem* Collections::CreateAnnotationSetItem(const DexFile& dex_file,
- const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
- if (disk_annotations_item == nullptr || (disk_annotations_item->size_ == 0 && offset == 0)) {
- return nullptr;
- }
- AnnotationSetItem* annotation_set_item = annotation_set_items_map_.GetExistingObject(offset);
- if (annotation_set_item == nullptr) {
- std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
- for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
- const DexFile::AnnotationItem* annotation =
- dex_file.GetAnnotationItem(disk_annotations_item, i);
- if (annotation == nullptr) {
- continue;
- }
- AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
- items->push_back(annotation_item);
- }
- annotation_set_item = new AnnotationSetItem(items);
- AddItem(annotation_set_items_map_, annotation_set_items_, annotation_set_item, offset);
- }
- return annotation_set_item;
-}
-
-AnnotationsDirectoryItem* Collections::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
- const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
- AnnotationsDirectoryItem* annotations_directory_item =
- annotations_directory_items_map_.GetExistingObject(offset);
- if (annotations_directory_item != nullptr) {
- return annotations_directory_item;
- }
- const DexFile::AnnotationSetItem* class_set_item =
- dex_file.GetClassAnnotationSet(disk_annotations_item);
- AnnotationSetItem* class_annotation = nullptr;
- if (class_set_item != nullptr) {
- uint32_t item_offset = disk_annotations_item->class_annotations_off_;
- class_annotation = CreateAnnotationSetItem(dex_file, class_set_item, item_offset);
- }
- const DexFile::FieldAnnotationsItem* fields =
- dex_file.GetFieldAnnotations(disk_annotations_item);
- FieldAnnotationVector* field_annotations = nullptr;
- if (fields != nullptr) {
- field_annotations = new FieldAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
- FieldId* field_id = GetFieldId(fields[i].field_idx_);
- const DexFile::AnnotationSetItem* field_set_item =
- dex_file.GetFieldAnnotationSetItem(fields[i]);
- uint32_t annotation_set_offset = fields[i].annotations_off_;
- AnnotationSetItem* annotation_set_item =
- CreateAnnotationSetItem(dex_file, field_set_item, annotation_set_offset);
- field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
- new FieldAnnotation(field_id, annotation_set_item)));
- }
- }
- const DexFile::MethodAnnotationsItem* methods =
- dex_file.GetMethodAnnotations(disk_annotations_item);
- MethodAnnotationVector* method_annotations = nullptr;
- if (methods != nullptr) {
- method_annotations = new MethodAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
- MethodId* method_id = GetMethodId(methods[i].method_idx_);
- const DexFile::AnnotationSetItem* method_set_item =
- dex_file.GetMethodAnnotationSetItem(methods[i]);
- uint32_t annotation_set_offset = methods[i].annotations_off_;
- AnnotationSetItem* annotation_set_item =
- CreateAnnotationSetItem(dex_file, method_set_item, annotation_set_offset);
- method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
- new MethodAnnotation(method_id, annotation_set_item)));
- }
- }
- const DexFile::ParameterAnnotationsItem* parameters =
- dex_file.GetParameterAnnotations(disk_annotations_item);
- ParameterAnnotationVector* parameter_annotations = nullptr;
- if (parameters != nullptr) {
- parameter_annotations = new ParameterAnnotationVector();
- for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
- MethodId* method_id = GetMethodId(parameters[i].method_idx_);
- const DexFile::AnnotationSetRefList* list =
- dex_file.GetParameterAnnotationSetRefList(&parameters[i]);
- parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
- GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_)));
- }
- }
- // TODO: Calculate the size of the annotations directory.
-annotations_directory_item = new AnnotationsDirectoryItem(
- class_annotation, field_annotations, method_annotations, parameter_annotations);
- AddItem(annotations_directory_items_map_,
- annotations_directory_items_,
- annotations_directory_item,
- offset);
- return annotations_directory_item;
-}
-
-ParameterAnnotation* Collections::GenerateParameterAnnotation(
- const DexFile& dex_file, MethodId* method_id,
- const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset) {
- AnnotationSetRefList* set_ref_list = annotation_set_ref_lists_map_.GetExistingObject(offset);
- if (set_ref_list == nullptr) {
- std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
- for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
- const DexFile::AnnotationSetItem* annotation_set_item =
- dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
- uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_;
- annotations->push_back(CreateAnnotationSetItem(dex_file, annotation_set_item, set_offset));
- }
- set_ref_list = new AnnotationSetRefList(annotations);
- AddItem(annotation_set_ref_lists_map_, annotation_set_ref_lists_, set_ref_list, offset);
- }
- return new ParameterAnnotation(method_id, set_ref_list);
-}
-
-CodeItem* Collections::DedupeOrCreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem* disk_code_item,
- uint32_t offset,
- uint32_t dex_method_index) {
- if (disk_code_item == nullptr) {
- return nullptr;
- }
- CodeItemDebugInfoAccessor accessor(dex_file, disk_code_item, dex_method_index);
- const uint32_t debug_info_offset = accessor.DebugInfoOffset();
-
- // Create the offsets pair and dedupe based on it.
- std::pair<uint32_t, uint32_t> offsets_pair(offset, debug_info_offset);
- auto existing = code_items_map_.find(offsets_pair);
- if (existing != code_items_map_.end()) {
- return existing->second;
- }
-
- const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(debug_info_offset);
- DebugInfoItem* debug_info = nullptr;
- if (debug_info_stream != nullptr) {
- debug_info = debug_info_items_map_.GetExistingObject(debug_info_offset);
- if (debug_info == nullptr) {
- uint32_t debug_info_size = GetDebugInfoStreamSize(debug_info_stream);
- uint8_t* debug_info_buffer = new uint8_t[debug_info_size];
- memcpy(debug_info_buffer, debug_info_stream, debug_info_size);
- debug_info = new DebugInfoItem(debug_info_size, debug_info_buffer);
- AddItem(debug_info_items_map_, debug_info_items_, debug_info, debug_info_offset);
- }
- }
-
- uint32_t insns_size = accessor.InsnsSizeInCodeUnits();
- uint16_t* insns = new uint16_t[insns_size];
- memcpy(insns, accessor.Insns(), insns_size * sizeof(uint16_t));
-
- TryItemVector* tries = nullptr;
- CatchHandlerVector* handler_list = nullptr;
- if (accessor.TriesSize() > 0) {
- tries = new TryItemVector();
- handler_list = new CatchHandlerVector();
- for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
- uint32_t start_addr = disk_try_item.start_addr_;
- uint16_t insn_count = disk_try_item.insn_count_;
- uint16_t handler_off = disk_try_item.handler_off_;
- const CatchHandler* handlers = nullptr;
- for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
- if (handler_off == existing_handlers->GetListOffset()) {
- handlers = existing_handlers.get();
- break;
- }
- }
- if (handlers == nullptr) {
- bool catch_all = false;
- TypeAddrPairVector* addr_pairs = new TypeAddrPairVector();
- for (CatchHandlerIterator it(accessor, disk_try_item); it.HasNext(); it.Next()) {
- const dex::TypeIndex type_index = it.GetHandlerTypeIndex();
- const TypeId* type_id = GetTypeIdOrNullPtr(type_index.index_);
- catch_all |= type_id == nullptr;
- addr_pairs->push_back(std::unique_ptr<const TypeAddrPair>(
- new TypeAddrPair(type_id, it.GetHandlerAddress())));
- }
- handlers = new CatchHandler(catch_all, handler_off, addr_pairs);
- handler_list->push_back(std::unique_ptr<const CatchHandler>(handlers));
- }
- TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
- tries->push_back(std::unique_ptr<const TryItem>(try_item));
- }
- // Manually walk catch handlers list and add any missing handlers unreferenced by try items.
- const uint8_t* handlers_base = accessor.GetCatchHandlerData();
- const uint8_t* handlers_data = handlers_base;
- uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_data);
- while (handlers_size > handler_list->size()) {
- bool already_added = false;
- uint16_t handler_off = handlers_data - handlers_base;
- for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
- if (handler_off == existing_handlers->GetListOffset()) {
- already_added = true;
- break;
- }
- }
- int32_t size = DecodeSignedLeb128(&handlers_data);
- bool has_catch_all = size <= 0;
- if (has_catch_all) {
- size = -size;
- }
- if (already_added) {
- for (int32_t i = 0; i < size; i++) {
- DecodeUnsignedLeb128(&handlers_data);
- DecodeUnsignedLeb128(&handlers_data);
- }
- if (has_catch_all) {
- DecodeUnsignedLeb128(&handlers_data);
- }
- continue;
- }
- TypeAddrPairVector* addr_pairs = new TypeAddrPairVector();
- for (int32_t i = 0; i < size; i++) {
- const TypeId* type_id = GetTypeIdOrNullPtr(DecodeUnsignedLeb128(&handlers_data));
- uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
- addr_pairs->push_back(
- std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(type_id, addr)));
- }
- if (has_catch_all) {
- uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
- addr_pairs->push_back(
- std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(nullptr, addr)));
- }
- const CatchHandler* handler = new CatchHandler(has_catch_all, handler_off, addr_pairs);
- handler_list->push_back(std::unique_ptr<const CatchHandler>(handler));
- }
- }
-
- uint32_t size = dex_file.GetCodeItemSize(*disk_code_item);
- CodeItem* code_item = new CodeItem(accessor.RegistersSize(),
- accessor.InsSize(),
- accessor.OutsSize(),
- debug_info,
- insns_size,
- insns,
- tries,
- handler_list);
- code_item->SetSize(size);
-
- // Add the code item to the map.
- DCHECK(!code_item->OffsetAssigned());
- if (eagerly_assign_offsets_) {
- code_item->SetOffset(offset);
- }
- code_items_map_.emplace(offsets_pair, code_item);
- code_items_.AddItem(code_item);
-
- // Add "fixup" references to types, strings, methods, and fields.
- // This is temporary, as we will probably want more detailed parsing of the
- // instructions here.
- std::vector<TypeId*> type_ids;
- std::vector<StringId*> string_ids;
- std::vector<MethodId*> method_ids;
- std::vector<FieldId*> field_ids;
- if (GetIdsFromByteCode(*this,
- code_item,
- /*out*/ &type_ids,
- /*out*/ &string_ids,
- /*out*/ &method_ids,
- /*out*/ &field_ids)) {
- CodeFixups* fixups = new CodeFixups(std::move(type_ids),
- std::move(string_ids),
- std::move(method_ids),
- std::move(field_ids));
- code_item->SetCodeFixups(fixups);
- }
-
- return code_item;
-}
-
-MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
- MethodId* method_id = GetMethodId(cdii.GetMemberIndex());
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
- // they may have different debug info streams.
- CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
- disk_code_item,
- cdii.GetMethodCodeItemOffset(),
- cdii.GetMemberIndex());
- return new MethodItem(access_flags, method_id, code_item);
-}
-
-ClassData* Collections::CreateClassData(
- const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
- // Read the fields and methods defined by the class, resolving the circular reference from those
- // to classes by setting class at the same time.
- ClassData* class_data = class_datas_map_.GetExistingObject(offset);
- if (class_data == nullptr && encoded_data != nullptr) {
- ClassDataItemIterator cdii(dex_file, encoded_data);
- // Static fields.
- FieldItemVector* static_fields = new FieldItemVector();
- for (; cdii.HasNextStaticField(); cdii.Next()) {
- FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- static_fields->push_back(std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
- }
- // Instance fields.
- FieldItemVector* instance_fields = new FieldItemVector();
- for (; cdii.HasNextInstanceField(); cdii.Next()) {
- FieldId* field_item = GetFieldId(cdii.GetMemberIndex());
- uint32_t access_flags = cdii.GetRawMemberAccessFlags();
- instance_fields->push_back(
- std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
- }
- // Direct methods.
- MethodItemVector* direct_methods = new MethodItemVector();
- for (; cdii.HasNextDirectMethod(); cdii.Next()) {
- direct_methods->push_back(std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
- }
- // Virtual methods.
- MethodItemVector* virtual_methods = new MethodItemVector();
- for (; cdii.HasNextVirtualMethod(); cdii.Next()) {
- virtual_methods->push_back(std::unique_ptr<MethodItem>(GenerateMethodItem(dex_file, cdii)));
- }
- class_data = new ClassData(static_fields, instance_fields, direct_methods, virtual_methods);
- class_data->SetSize(cdii.EndDataPointer() - encoded_data);
- AddItem(class_datas_map_, class_datas_, class_data, offset);
- }
- return class_data;
-}
-
-void Collections::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
- // Iterate through the map list and set the offset of the CallSiteIds and MethodHandleItems.
- const DexFile::MapList* map = dex_file.GetMapList();
- for (uint32_t i = 0; i < map->size_; ++i) {
- const DexFile::MapItem* item = map->list_ + i;
- switch (item->type_) {
- case DexFile::kDexTypeCallSiteIdItem:
- SetCallSiteIdsOffset(item->offset_);
- break;
- case DexFile::kDexTypeMethodHandleItem:
- SetMethodHandleItemsOffset(item->offset_);
- break;
- default:
- break;
- }
- }
- // Populate MethodHandleItems first (CallSiteIds may depend on them).
- for (uint32_t i = 0; i < dex_file.NumMethodHandles(); i++) {
- CreateMethodHandleItem(dex_file, i);
- }
- // Populate CallSiteIds.
- for (uint32_t i = 0; i < dex_file.NumCallSiteIds(); i++) {
- CreateCallSiteId(dex_file, i);
- }
-}
-
-void Collections::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
- const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
- const uint8_t* disk_call_item_ptr = dex_file.DataBegin() + disk_call_site_id.data_off_;
- EncodedArrayItem* call_site_item =
- CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
-
- CallSiteId* call_site_id = new CallSiteId(call_site_item);
- AddIndexedItem(call_site_ids_, call_site_id, CallSiteIdsOffset() + i * CallSiteId::ItemSize(), i);
-}
-
-void Collections::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
- const DexFile::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
- uint16_t index = disk_method_handle.field_or_method_idx_;
- DexFile::MethodHandleType type =
- static_cast<DexFile::MethodHandleType>(disk_method_handle.method_handle_type_);
- bool is_invoke = type == DexFile::MethodHandleType::kInvokeStatic ||
- type == DexFile::MethodHandleType::kInvokeInstance ||
- type == DexFile::MethodHandleType::kInvokeConstructor ||
- type == DexFile::MethodHandleType::kInvokeDirect ||
- type == DexFile::MethodHandleType::kInvokeInterface;
- static_assert(DexFile::MethodHandleType::kLast == DexFile::MethodHandleType::kInvokeInterface,
- "Unexpected method handle types.");
- IndexedItem* field_or_method_id;
- if (is_invoke) {
- field_or_method_id = GetMethodId(index);
- } else {
- field_or_method_id = GetFieldId(index);
- }
- MethodHandleItem* method_handle = new MethodHandleItem(type, field_or_method_id);
- AddIndexedItem(method_handle_items_,
- method_handle,
- MethodHandleItemsOffset() + i * MethodHandleItem::ItemSize(),
- i);
-}
-
-void Collections::SortVectorsByMapOrder() {
- string_datas_.SortByMapOrder(string_datas_map_.Collection());
- type_lists_.SortByMapOrder(type_lists_map_.Collection());
- encoded_array_items_.SortByMapOrder(encoded_array_items_map_.Collection());
- annotation_items_.SortByMapOrder(annotation_items_map_.Collection());
- annotation_set_items_.SortByMapOrder(annotation_set_items_map_.Collection());
- annotation_set_ref_lists_.SortByMapOrder(annotation_set_ref_lists_map_.Collection());
- annotations_directory_items_.SortByMapOrder(annotations_directory_items_map_.Collection());
- debug_info_items_.SortByMapOrder(debug_info_items_map_.Collection());
- code_items_.SortByMapOrder(code_items_map_);
- class_datas_.SortByMapOrder(class_datas_map_.Collection());
-}
-
-static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
+static uint32_t HeaderOffset(const dex_ir::Header* header ATTRIBUTE_UNUSED) {
return 0;
}
-static uint32_t HeaderSize(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
+static uint32_t HeaderSize(const dex_ir::Header* header ATTRIBUTE_UNUSED) {
// Size is in elements, so there is only one header.
return 1;
}
@@ -859,9 +45,9 @@ struct FileSectionDescriptor {
std::string name;
uint16_t type;
// A function that when applied to a collection object, gives the size of the section.
- std::function<uint32_t(const dex_ir::Collections&)> size_fn;
+ std::function<uint32_t(dex_ir::Header*)> size_fn;
// A function that when applied to a collection object, gives the offset of the section.
- std::function<uint32_t(const dex_ir::Collections&)> offset_fn;
+ std::function<uint32_t(dex_ir::Header*)> offset_fn;
};
static const FileSectionDescriptor kFileSectionDescriptors[] = {
@@ -873,106 +59,105 @@ static const FileSectionDescriptor kFileSectionDescriptors[] = {
}, {
"StringId",
DexFile::kDexTypeStringIdItem,
- &dex_ir::Collections::StringIdsSize,
- &dex_ir::Collections::StringIdsOffset
+ [](const dex_ir::Header* h) { return h->StringIds().Size(); },
+ [](const dex_ir::Header* h) { return h->StringIds().GetOffset(); }
}, {
"TypeId",
DexFile::kDexTypeTypeIdItem,
- &dex_ir::Collections::TypeIdsSize,
- &dex_ir::Collections::TypeIdsOffset
+ [](const dex_ir::Header* h) { return h->TypeIds().Size(); },
+ [](const dex_ir::Header* h) { return h->TypeIds().GetOffset(); }
}, {
"ProtoId",
DexFile::kDexTypeProtoIdItem,
- &dex_ir::Collections::ProtoIdsSize,
- &dex_ir::Collections::ProtoIdsOffset
+ [](const dex_ir::Header* h) { return h->ProtoIds().Size(); },
+ [](const dex_ir::Header* h) { return h->ProtoIds().GetOffset(); }
}, {
"FieldId",
DexFile::kDexTypeFieldIdItem,
- &dex_ir::Collections::FieldIdsSize,
- &dex_ir::Collections::FieldIdsOffset
+ [](const dex_ir::Header* h) { return h->FieldIds().Size(); },
+ [](const dex_ir::Header* h) { return h->FieldIds().GetOffset(); }
}, {
"MethodId",
DexFile::kDexTypeMethodIdItem,
- &dex_ir::Collections::MethodIdsSize,
- &dex_ir::Collections::MethodIdsOffset
+ [](const dex_ir::Header* h) { return h->MethodIds().Size(); },
+ [](const dex_ir::Header* h) { return h->MethodIds().GetOffset(); }
}, {
"ClassDef",
DexFile::kDexTypeClassDefItem,
- &dex_ir::Collections::ClassDefsSize,
- &dex_ir::Collections::ClassDefsOffset
+ [](const dex_ir::Header* h) { return h->ClassDefs().Size(); },
+ [](const dex_ir::Header* h) { return h->ClassDefs().GetOffset(); }
}, {
"CallSiteId",
DexFile::kDexTypeCallSiteIdItem,
- &dex_ir::Collections::CallSiteIdsSize,
- &dex_ir::Collections::CallSiteIdsOffset
+ [](const dex_ir::Header* h) { return h->CallSiteIds().Size(); },
+ [](const dex_ir::Header* h) { return h->CallSiteIds().GetOffset(); }
}, {
"MethodHandle",
DexFile::kDexTypeMethodHandleItem,
- &dex_ir::Collections::MethodHandleItemsSize,
- &dex_ir::Collections::MethodHandleItemsOffset
+ [](const dex_ir::Header* h) { return h->MethodHandleItems().Size(); },
+ [](const dex_ir::Header* h) { return h->MethodHandleItems().GetOffset(); }
}, {
"StringData",
DexFile::kDexTypeStringDataItem,
- &dex_ir::Collections::StringDatasSize,
- &dex_ir::Collections::StringDatasOffset
+ [](const dex_ir::Header* h) { return h->StringDatas().Size(); },
+ [](const dex_ir::Header* h) { return h->StringDatas().GetOffset(); }
}, {
"TypeList",
DexFile::kDexTypeTypeList,
- &dex_ir::Collections::TypeListsSize,
- &dex_ir::Collections::TypeListsOffset
+ [](const dex_ir::Header* h) { return h->TypeLists().Size(); },
+ [](const dex_ir::Header* h) { return h->TypeLists().GetOffset(); }
}, {
"EncArr",
DexFile::kDexTypeEncodedArrayItem,
- &dex_ir::Collections::EncodedArrayItemsSize,
- &dex_ir::Collections::EncodedArrayItemsOffset
+ [](const dex_ir::Header* h) { return h->EncodedArrayItems().Size(); },
+ [](const dex_ir::Header* h) { return h->EncodedArrayItems().GetOffset(); }
}, {
"Annotation",
DexFile::kDexTypeAnnotationItem,
- &dex_ir::Collections::AnnotationItemsSize,
- &dex_ir::Collections::AnnotationItemsOffset
+ [](const dex_ir::Header* h) { return h->AnnotationItems().Size(); },
+ [](const dex_ir::Header* h) { return h->AnnotationItems().GetOffset(); }
}, {
"AnnoSet",
DexFile::kDexTypeAnnotationSetItem,
- &dex_ir::Collections::AnnotationSetItemsSize,
- &dex_ir::Collections::AnnotationSetItemsOffset
+ [](const dex_ir::Header* h) { return h->AnnotationSetItems().Size(); },
+ [](const dex_ir::Header* h) { return h->AnnotationSetItems().GetOffset(); }
}, {
"AnnoSetRL",
DexFile::kDexTypeAnnotationSetRefList,
- &dex_ir::Collections::AnnotationSetRefListsSize,
- &dex_ir::Collections::AnnotationSetRefListsOffset
+ [](const dex_ir::Header* h) { return h->AnnotationSetRefLists().Size(); },
+ [](const dex_ir::Header* h) { return h->AnnotationSetRefLists().GetOffset(); }
}, {
"AnnoDir",
DexFile::kDexTypeAnnotationsDirectoryItem,
- &dex_ir::Collections::AnnotationsDirectoryItemsSize,
- &dex_ir::Collections::AnnotationsDirectoryItemsOffset
+ [](const dex_ir::Header* h) { return h->AnnotationsDirectoryItems().Size(); },
+ [](const dex_ir::Header* h) { return h->AnnotationsDirectoryItems().GetOffset(); }
}, {
"DebugInfo",
DexFile::kDexTypeDebugInfoItem,
- &dex_ir::Collections::DebugInfoItemsSize,
- &dex_ir::Collections::DebugInfoItemsOffset
+ [](const dex_ir::Header* h) { return h->DebugInfoItems().Size(); },
+ [](const dex_ir::Header* h) { return h->DebugInfoItems().GetOffset(); }
}, {
"CodeItem",
DexFile::kDexTypeCodeItem,
- &dex_ir::Collections::CodeItemsSize,
- &dex_ir::Collections::CodeItemsOffset
+ [](const dex_ir::Header* h) { return h->CodeItems().Size(); },
+ [](const dex_ir::Header* h) { return h->CodeItems().GetOffset(); }
}, {
"ClassData",
DexFile::kDexTypeClassDataItem,
- &dex_ir::Collections::ClassDatasSize,
- &dex_ir::Collections::ClassDatasOffset
+ [](const dex_ir::Header* h) { return h->ClassDatas().Size(); },
+ [](const dex_ir::Header* h) { return h->ClassDatas().GetOffset(); }
}
};
std::vector<dex_ir::DexFileSection> GetSortedDexFileSections(dex_ir::Header* header,
dex_ir::SortDirection direction) {
- const dex_ir::Collections& collections = header->GetCollections();
std::vector<dex_ir::DexFileSection> sorted_sections;
// Build the table that will map from offset to color
for (const FileSectionDescriptor& s : kFileSectionDescriptors) {
sorted_sections.push_back(dex_ir::DexFileSection(s.name,
s.type,
- s.size_fn(collections),
- s.offset_fn(collections)));
+ s.size_fn(header),
+ s.offset_fn(header)));
}
// Sort by offset.
std::sort(sorted_sections.begin(),
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 5ecad2bf87..9f355ba9e8 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -24,6 +24,7 @@
#include <map>
#include <vector>
+#include "base/iteration_range.h"
#include "base/leb128.h"
#include "base/stl_util.h"
#include "dex/dex_file-inl.h"
@@ -107,377 +108,195 @@ class AbstractDispatcher {
DISALLOW_COPY_AND_ASSIGN(AbstractDispatcher);
};
-// Collections become owners of the objects added by moving them into unique pointers.
-template<class T> class CollectionBase {
+template<class T> class Iterator : public std::iterator<std::random_access_iterator_tag, T> {
public:
- CollectionBase() = default;
+ using value_type = typename std::iterator<std::random_access_iterator_tag, T>::value_type;
+ using difference_type =
+ typename std::iterator<std::random_access_iterator_tag, value_type>::difference_type;
+ using pointer = typename std::iterator<std::random_access_iterator_tag, value_type>::pointer;
+ using reference = typename std::iterator<std::random_access_iterator_tag, value_type>::reference;
+
+ Iterator(const Iterator&) = default;
+ Iterator(Iterator&&) = default;
+ Iterator& operator=(const Iterator&) = default;
+ Iterator& operator=(Iterator&&) = default;
+
+ Iterator(const std::vector<T>& vector,
+ uint32_t position,
+ uint32_t iterator_end)
+ : vector_(&vector),
+ position_(position),
+ iterator_end_(iterator_end) { }
+ Iterator() : vector_(nullptr), position_(0U), iterator_end_(0U) { }
+
+ bool IsValid() const { return position_ < iterator_end_; }
+
+ bool operator==(const Iterator& rhs) const { return position_ == rhs.position_; }
+ bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
+ bool operator<(const Iterator& rhs) const { return position_ < rhs.position_; }
+ bool operator>(const Iterator& rhs) const { return rhs < *this; }
+ bool operator<=(const Iterator& rhs) const { return !(rhs < *this); }
+ bool operator>=(const Iterator& rhs) const { return !(*this < rhs); }
+
+ Iterator& operator++() { // Value after modification.
+ ++position_;
+ return *this;
+ }
- uint32_t GetOffset() const {
- return offset_;
+ Iterator operator++(int) {
+ Iterator temp = *this;
+ ++position_;
+ return temp;
}
- void SetOffset(uint32_t new_offset) {
- offset_ = new_offset;
+
+ Iterator& operator+=(difference_type delta) {
+ position_ += delta;
+ return *this;
}
- private:
- // Start out unassigned.
- uint32_t offset_ = 0u;
+ Iterator operator+(difference_type delta) const {
+ Iterator temp = *this;
+ temp += delta;
+ return temp;
+ }
- DISALLOW_COPY_AND_ASSIGN(CollectionBase);
-};
+ Iterator& operator--() { // Value after modification.
+ --position_;
+ return *this;
+ }
-template<class T> class CollectionVector : public CollectionBase<T> {
- public:
- using Vector = std::vector<std::unique_ptr<T>>;
- CollectionVector() = default;
+ Iterator operator--(int) {
+ Iterator temp = *this;
+ --position_;
+ return temp;
+ }
- uint32_t Size() const { return collection_.size(); }
- Vector& Collection() { return collection_; }
- const Vector& Collection() const { return collection_; }
+ Iterator& operator-=(difference_type delta) {
+ position_ -= delta;
+ return *this;
+ }
- // Sort the vector by copying pointers over.
- template <typename MapType>
- void SortByMapOrder(const MapType& map) {
- auto it = map.begin();
- CHECK_EQ(map.size(), Size());
- for (size_t i = 0; i < Size(); ++i) {
- // There are times when the array will temporarily contain the same pointer twice, doing the
- // release here sure there is no double free errors.
- Collection()[i].release();
- Collection()[i].reset(it->second);
- ++it;
- }
+ Iterator operator-(difference_type delta) const {
+ Iterator temp = *this;
+ temp -= delta;
+ return temp;
}
- protected:
- Vector collection_;
+ difference_type operator-(const Iterator& rhs) {
+ return position_ - rhs.position_;
+ }
- void AddItem(T* object) {
- collection_.push_back(std::unique_ptr<T>(object));
+ reference operator*() const {
+ return const_cast<reference>((*vector_)[position_]);
}
- private:
- friend class Collections;
- DISALLOW_COPY_AND_ASSIGN(CollectionVector);
-};
+ pointer operator->() const {
+ return const_cast<pointer>(&((*vector_)[position_]));
+ }
-template<class T> class IndexedCollectionVector : public CollectionVector<T> {
- public:
- using Vector = std::vector<std::unique_ptr<T>>;
- IndexedCollectionVector() = default;
+ reference operator[](difference_type n) const {
+ return (*vector_)[position_ + n];
+ }
private:
- void AddIndexedItem(T* object, uint32_t index) {
- object->SetIndex(index);
- CollectionVector<T>::collection_.push_back(std::unique_ptr<T>(object));
- }
+ const std::vector<T>* vector_;
+ uint32_t position_;
+ uint32_t iterator_end_;
- friend class Collections;
- DISALLOW_COPY_AND_ASSIGN(IndexedCollectionVector);
+ template <typename U>
+ friend bool operator<(const Iterator<U>& lhs, const Iterator<U>& rhs);
};
-template<class T> class CollectionMap : public CollectionBase<T> {
+// Collections become owners of the objects added by moving them into unique pointers.
+class CollectionBase {
public:
- CollectionMap() = default;
-
- // Returns the existing item if it is already inserted, null otherwise.
- T* GetExistingObject(uint32_t offset) {
- auto it = collection_.find(offset);
- return it != collection_.end() ? it->second : nullptr;
- }
+ CollectionBase() = default;
+ virtual ~CollectionBase() { }
- // Lower case for template interop with std::map.
- uint32_t size() const { return collection_.size(); }
- std::map<uint32_t, T*>& Collection() { return collection_; }
+ uint32_t GetOffset() const { return offset_; }
+ void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
+ virtual uint32_t Size() const { return 0U; }
private:
- std::map<uint32_t, T*> collection_;
-
- void AddItem(T* object, uint32_t offset) {
- auto it = collection_.emplace(offset, object);
- CHECK(it.second) << "CollectionMap already has an object with offset " << offset << " "
- << " and address " << it.first->second;
- }
+ // Start out unassigned.
+ uint32_t offset_ = 0u;
- friend class Collections;
- DISALLOW_COPY_AND_ASSIGN(CollectionMap);
+ DISALLOW_COPY_AND_ASSIGN(CollectionBase);
};
-class Collections {
+template<class T> class CollectionVector : public CollectionBase {
public:
- Collections() = default;
-
- CollectionVector<StringId>::Vector& StringIds() { return string_ids_.Collection(); }
- CollectionVector<TypeId>::Vector& TypeIds() { return type_ids_.Collection(); }
- CollectionVector<ProtoId>::Vector& ProtoIds() { return proto_ids_.Collection(); }
- CollectionVector<FieldId>::Vector& FieldIds() { return field_ids_.Collection(); }
- CollectionVector<MethodId>::Vector& MethodIds() { return method_ids_.Collection(); }
- CollectionVector<ClassDef>::Vector& ClassDefs() { return class_defs_.Collection(); }
- CollectionVector<CallSiteId>::Vector& CallSiteIds() { return call_site_ids_.Collection(); }
- CollectionVector<MethodHandleItem>::Vector& MethodHandleItems()
- { return method_handle_items_.Collection(); }
- CollectionVector<StringData>::Vector& StringDatas() { return string_datas_.Collection(); }
- CollectionVector<TypeList>::Vector& TypeLists() { return type_lists_.Collection(); }
- CollectionVector<EncodedArrayItem>::Vector& EncodedArrayItems()
- { return encoded_array_items_.Collection(); }
- CollectionVector<AnnotationItem>::Vector& AnnotationItems()
- { return annotation_items_.Collection(); }
- CollectionVector<AnnotationSetItem>::Vector& AnnotationSetItems()
- { return annotation_set_items_.Collection(); }
- CollectionVector<AnnotationSetRefList>::Vector& AnnotationSetRefLists()
- { return annotation_set_ref_lists_.Collection(); }
- CollectionVector<AnnotationsDirectoryItem>::Vector& AnnotationsDirectoryItems()
- { return annotations_directory_items_.Collection(); }
- CollectionVector<DebugInfoItem>::Vector& DebugInfoItems()
- { return debug_info_items_.Collection(); }
- CollectionVector<CodeItem>::Vector& CodeItems() { return code_items_.Collection(); }
- CollectionVector<ClassData>::Vector& ClassDatas() { return class_datas_.Collection(); }
-
- const CollectionVector<ClassDef>::Vector& ClassDefs() const { return class_defs_.Collection(); }
-
- void CreateStringId(const DexFile& dex_file, uint32_t i);
- void CreateTypeId(const DexFile& dex_file, uint32_t i);
- void CreateProtoId(const DexFile& dex_file, uint32_t i);
- void CreateFieldId(const DexFile& dex_file, uint32_t i);
- void CreateMethodId(const DexFile& dex_file, uint32_t i);
- void CreateClassDef(const DexFile& dex_file, uint32_t i);
- void CreateCallSiteId(const DexFile& dex_file, uint32_t i);
- void CreateMethodHandleItem(const DexFile& dex_file, uint32_t i);
-
- void CreateCallSitesAndMethodHandles(const DexFile& dex_file);
-
- TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
- EncodedArrayItem* CreateEncodedArrayItem(const DexFile& dex_file,
- const uint8_t* static_data,
- uint32_t offset);
- AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
- const DexFile::AnnotationItem* annotation);
- AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
- const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
- AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
- const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
- CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem* disk_code_item,
- uint32_t offset,
- uint32_t dex_method_index);
- ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
- void AddAnnotationsFromMapListSection(const DexFile& dex_file,
- uint32_t start_offset,
- uint32_t count);
-
- StringId* GetStringId(uint32_t index) {
- CHECK_LT(index, StringIdsSize());
- return StringIds()[index].get();
- }
- TypeId* GetTypeId(uint32_t index) {
- CHECK_LT(index, TypeIdsSize());
- return TypeIds()[index].get();
- }
- ProtoId* GetProtoId(uint32_t index) {
- CHECK_LT(index, ProtoIdsSize());
- return ProtoIds()[index].get();
- }
- FieldId* GetFieldId(uint32_t index) {
- CHECK_LT(index, FieldIdsSize());
- return FieldIds()[index].get();
- }
- MethodId* GetMethodId(uint32_t index) {
- CHECK_LT(index, MethodIdsSize());
- return MethodIds()[index].get();
- }
- ClassDef* GetClassDef(uint32_t index) {
- CHECK_LT(index, ClassDefsSize());
- return ClassDefs()[index].get();
- }
- CallSiteId* GetCallSiteId(uint32_t index) {
- CHECK_LT(index, CallSiteIdsSize());
- return CallSiteIds()[index].get();
- }
- MethodHandleItem* GetMethodHandle(uint32_t index) {
- CHECK_LT(index, MethodHandleItemsSize());
- return MethodHandleItems()[index].get();
- }
+ using ElementType = std::unique_ptr<T>;
- StringId* GetStringIdOrNullPtr(uint32_t index) {
- return index == dex::kDexNoIndex ? nullptr : GetStringId(index);
+ CollectionVector() { }
+ explicit CollectionVector(size_t size) {
+ // Preallocate so that assignment does not invalidate pointers into the vector.
+ collection_.reserve(size);
}
- TypeId* GetTypeIdOrNullPtr(uint16_t index) {
- return index == DexFile::kDexNoIndex16 ? nullptr : GetTypeId(index);
+ virtual ~CollectionVector() OVERRIDE { }
+
+ template<class... Args>
+ T* CreateAndAddItem(Args&&... args) {
+ T* object = new T(std::forward<Args>(args)...);
+ collection_.push_back(std::unique_ptr<T>(object));
+ return object;
}
- uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); }
- uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); }
- uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); }
- uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); }
- uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); }
- uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); }
- uint32_t CallSiteIdsOffset() const { return call_site_ids_.GetOffset(); }
- uint32_t MethodHandleItemsOffset() const { return method_handle_items_.GetOffset(); }
- uint32_t StringDatasOffset() const { return string_datas_.GetOffset(); }
- uint32_t TypeListsOffset() const { return type_lists_.GetOffset(); }
- uint32_t EncodedArrayItemsOffset() const { return encoded_array_items_.GetOffset(); }
- uint32_t AnnotationItemsOffset() const { return annotation_items_.GetOffset(); }
- uint32_t AnnotationSetItemsOffset() const { return annotation_set_items_.GetOffset(); }
- uint32_t AnnotationSetRefListsOffset() const { return annotation_set_ref_lists_.GetOffset(); }
- uint32_t AnnotationsDirectoryItemsOffset() const
- { return annotations_directory_items_.GetOffset(); }
- uint32_t DebugInfoItemsOffset() const { return debug_info_items_.GetOffset(); }
- uint32_t CodeItemsOffset() const { return code_items_.GetOffset(); }
- uint32_t ClassDatasOffset() const { return class_datas_.GetOffset(); }
- uint32_t MapListOffset() const { return map_list_offset_; }
+ virtual uint32_t Size() const OVERRIDE { return collection_.size(); }
- void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); }
- void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); }
- void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); }
- void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); }
- void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); }
- void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); }
- void SetCallSiteIdsOffset(uint32_t new_offset) { call_site_ids_.SetOffset(new_offset); }
- void SetMethodHandleItemsOffset(uint32_t new_offset)
- { method_handle_items_.SetOffset(new_offset); }
- void SetStringDatasOffset(uint32_t new_offset) { string_datas_.SetOffset(new_offset); }
- void SetTypeListsOffset(uint32_t new_offset) { type_lists_.SetOffset(new_offset); }
- void SetEncodedArrayItemsOffset(uint32_t new_offset)
- { encoded_array_items_.SetOffset(new_offset); }
- void SetAnnotationItemsOffset(uint32_t new_offset) { annotation_items_.SetOffset(new_offset); }
- void SetAnnotationSetItemsOffset(uint32_t new_offset)
- { annotation_set_items_.SetOffset(new_offset); }
- void SetAnnotationSetRefListsOffset(uint32_t new_offset)
- { annotation_set_ref_lists_.SetOffset(new_offset); }
- void SetAnnotationsDirectoryItemsOffset(uint32_t new_offset)
- { annotations_directory_items_.SetOffset(new_offset); }
- void SetDebugInfoItemsOffset(uint32_t new_offset) { debug_info_items_.SetOffset(new_offset); }
- void SetCodeItemsOffset(uint32_t new_offset) { code_items_.SetOffset(new_offset); }
- void SetClassDatasOffset(uint32_t new_offset) { class_datas_.SetOffset(new_offset); }
- void SetMapListOffset(uint32_t new_offset) { map_list_offset_ = new_offset; }
+ Iterator<ElementType> begin() const { return Iterator<ElementType>(collection_, 0U, Size()); }
+ Iterator<ElementType> end() const { return Iterator<ElementType>(collection_, Size(), Size()); }
- uint32_t StringIdsSize() const { return string_ids_.Size(); }
- uint32_t TypeIdsSize() const { return type_ids_.Size(); }
- uint32_t ProtoIdsSize() const { return proto_ids_.Size(); }
- uint32_t FieldIdsSize() const { return field_ids_.Size(); }
- uint32_t MethodIdsSize() const { return method_ids_.Size(); }
- uint32_t ClassDefsSize() const { return class_defs_.Size(); }
- uint32_t CallSiteIdsSize() const { return call_site_ids_.Size(); }
- uint32_t MethodHandleItemsSize() const { return method_handle_items_.Size(); }
- uint32_t StringDatasSize() const { return string_datas_.Size(); }
- uint32_t TypeListsSize() const { return type_lists_.Size(); }
- uint32_t EncodedArrayItemsSize() const { return encoded_array_items_.Size(); }
- uint32_t AnnotationItemsSize() const { return annotation_items_.Size(); }
- uint32_t AnnotationSetItemsSize() const { return annotation_set_items_.Size(); }
- uint32_t AnnotationSetRefListsSize() const { return annotation_set_ref_lists_.Size(); }
- uint32_t AnnotationsDirectoryItemsSize() const { return annotations_directory_items_.Size(); }
- uint32_t DebugInfoItemsSize() const { return debug_info_items_.Size(); }
- uint32_t CodeItemsSize() const { return code_items_.Size(); }
- uint32_t ClassDatasSize() const { return class_datas_.Size(); }
-
- // Sort the vectors buy map order (same order that was used in the input file).
- void SortVectorsByMapOrder();
-
- template <typename Type>
- void AddItem(CollectionMap<Type>& map,
- CollectionVector<Type>& vector,
- Type* item,
- uint32_t offset) {
- DCHECK(!map.GetExistingObject(offset));
- DCHECK(!item->OffsetAssigned());
- if (eagerly_assign_offsets_) {
- item->SetOffset(offset);
- }
- map.AddItem(item, offset);
- vector.AddItem(item);
+ const ElementType& operator[](size_t index) const {
+ DCHECK_LT(index, Size());
+ return collection_[index];
}
-
- template <typename Type>
- void AddIndexedItem(IndexedCollectionVector<Type>& vector,
- Type* item,
- uint32_t offset,
- uint32_t index) {
- DCHECK(!item->OffsetAssigned());
- if (eagerly_assign_offsets_) {
- item->SetOffset(offset);
- }
- vector.AddIndexedItem(item, index);
+ ElementType& operator[](size_t index) {
+ DCHECK_LT(index, Size());
+ return collection_[index];
}
- void SetEagerlyAssignOffsets(bool eagerly_assign_offsets) {
- eagerly_assign_offsets_ = eagerly_assign_offsets;
- }
-
- void SetLinkData(std::vector<uint8_t>&& link_data) {
- link_data_ = std::move(link_data);
+ // Sort the vector by copying pointers over.
+ template <typename MapType>
+ void SortByMapOrder(const MapType& map) {
+ auto it = map.begin();
+ CHECK_EQ(map.size(), Size());
+ for (size_t i = 0; i < Size(); ++i) {
+ // There are times when the array will temporarily contain the same pointer twice, doing the
+ // release here sure there is no double free errors.
+ collection_[i].release();
+ collection_[i].reset(it->second);
+ ++it;
+ }
}
- const std::vector<uint8_t>& LinkData() const {
- return link_data_;
- }
+ protected:
+ std::vector<ElementType> collection_;
private:
- EncodedValue* ReadEncodedValue(const DexFile& dex_file, const uint8_t** data);
- EncodedValue* ReadEncodedValue(const DexFile& dex_file,
- const uint8_t** data,
- uint8_t type,
- uint8_t length);
- void ReadEncodedValue(const DexFile& dex_file,
- const uint8_t** data,
- uint8_t type,
- uint8_t length,
- EncodedValue* item);
-
- ParameterAnnotation* GenerateParameterAnnotation(const DexFile& dex_file, MethodId* method_id,
- const DexFile::AnnotationSetRefList* annotation_set_ref_list, uint32_t offset);
- MethodItem* GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
-
- // Collection vectors own the IR data.
- IndexedCollectionVector<StringId> string_ids_;
- IndexedCollectionVector<TypeId> type_ids_;
- IndexedCollectionVector<ProtoId> proto_ids_;
- IndexedCollectionVector<FieldId> field_ids_;
- IndexedCollectionVector<MethodId> method_ids_;
- IndexedCollectionVector<CallSiteId> call_site_ids_;
- IndexedCollectionVector<MethodHandleItem> method_handle_items_;
- IndexedCollectionVector<StringData> string_datas_;
- IndexedCollectionVector<TypeList> type_lists_;
- IndexedCollectionVector<EncodedArrayItem> encoded_array_items_;
- IndexedCollectionVector<AnnotationItem> annotation_items_;
- IndexedCollectionVector<AnnotationSetItem> annotation_set_items_;
- IndexedCollectionVector<AnnotationSetRefList> annotation_set_ref_lists_;
- IndexedCollectionVector<AnnotationsDirectoryItem> annotations_directory_items_;
- IndexedCollectionVector<ClassDef> class_defs_;
- // The order of the vectors controls the layout of the output file by index order, to change the
- // layout just sort the vector. Note that you may only change the order of the non indexed vectors
- // below. Indexed vectors are accessed by indices in other places, changing the sorting order will
- // invalidate the existing indices and is not currently supported.
- CollectionVector<DebugInfoItem> debug_info_items_;
- CollectionVector<CodeItem> code_items_;
- CollectionVector<ClassData> class_datas_;
-
- // Note that the maps do not have ownership, the vectors do.
- // TODO: These maps should only be required for building the IR and should be put in a separate
- // IR builder class.
- CollectionMap<StringData> string_datas_map_;
- CollectionMap<TypeList> type_lists_map_;
- CollectionMap<EncodedArrayItem> encoded_array_items_map_;
- CollectionMap<AnnotationItem> annotation_items_map_;
- CollectionMap<AnnotationSetItem> annotation_set_items_map_;
- CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_map_;
- CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_map_;
- CollectionMap<DebugInfoItem> debug_info_items_map_;
- // Code item maps need to check both the debug info offset and debug info offset, do not use
- // CollectionMap.
- // First offset is the code item offset, second is the debug info offset.
- std::map<std::pair<uint32_t, uint32_t>, CodeItem*> code_items_map_;
- CollectionMap<ClassData> class_datas_map_;
+ DISALLOW_COPY_AND_ASSIGN(CollectionVector);
+};
- uint32_t map_list_offset_ = 0;
+template<class T> class IndexedCollectionVector : public CollectionVector<T> {
+ public:
+ using Vector = std::vector<std::unique_ptr<T>>;
+ IndexedCollectionVector() = default;
+ explicit IndexedCollectionVector(size_t size) : CollectionVector<T>(size) { }
- // Link data.
- std::vector<uint8_t> link_data_;
+ template <class... Args>
+ T* CreateAndAddIndexedItem(uint32_t index, Args&&... args) {
+ T* object = CollectionVector<T>::CreateAndAddItem(std::forward<Args>(args)...);
+ object->SetIndex(index);
+ return object;
+ }
- // If we eagerly assign offsets during IR building or later after layout. Must be false if
- // changing the layout is enabled.
- bool eagerly_assign_offsets_;
+ T* operator[](size_t index) const {
+ DCHECK_NE(CollectionVector<T>::collection_[index].get(), static_cast<T*>(nullptr));
+ return CollectionVector<T>::collection_[index].get();
+ }
- DISALLOW_COPY_AND_ASSIGN(Collections);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(IndexedCollectionVector);
};
class Item {
@@ -485,6 +304,8 @@ class Item {
Item() { }
virtual ~Item() { }
+ Item(Item&&) = default;
+
// Return the assigned offset.
uint32_t GetOffset() const WARN_UNUSED {
CHECK(OffsetAssigned());
@@ -536,18 +357,54 @@ class Header : public Item {
uint32_t data_size,
uint32_t data_offset,
bool support_default_methods)
+ : Item(0, kHeaderItemSize), support_default_methods_(support_default_methods) {
+ ConstructorHelper(magic,
+ checksum,
+ signature,
+ endian_tag,
+ file_size,
+ header_size,
+ link_size,
+ link_offset,
+ data_size,
+ data_offset);
+ }
+
+ Header(const uint8_t* magic,
+ uint32_t checksum,
+ const uint8_t* signature,
+ uint32_t endian_tag,
+ uint32_t file_size,
+ uint32_t header_size,
+ uint32_t link_size,
+ uint32_t link_offset,
+ uint32_t data_size,
+ uint32_t data_offset,
+ bool support_default_methods,
+ uint32_t num_string_ids,
+ uint32_t num_type_ids,
+ uint32_t num_proto_ids,
+ uint32_t num_field_ids,
+ uint32_t num_method_ids,
+ uint32_t num_class_defs)
: Item(0, kHeaderItemSize),
- checksum_(checksum),
- endian_tag_(endian_tag),
- file_size_(file_size),
- header_size_(header_size),
- link_size_(link_size),
- link_offset_(link_offset),
- data_size_(data_size),
- data_offset_(data_offset),
- support_default_methods_(support_default_methods) {
- memcpy(magic_, magic, sizeof(magic_));
- memcpy(signature_, signature, sizeof(signature_));
+ support_default_methods_(support_default_methods),
+ string_ids_(num_string_ids),
+ type_ids_(num_type_ids),
+ proto_ids_(num_proto_ids),
+ field_ids_(num_field_ids),
+ method_ids_(num_method_ids),
+ class_defs_(num_class_defs) {
+ ConstructorHelper(magic,
+ checksum,
+ signature,
+ endian_tag,
+ file_size,
+ header_size,
+ link_size,
+ link_offset,
+ data_size,
+ data_offset);
}
~Header() OVERRIDE { }
@@ -575,7 +432,69 @@ class Header : public Item {
void SetDataSize(uint32_t new_data_size) { data_size_ = new_data_size; }
void SetDataOffset(uint32_t new_data_offset) { data_offset_ = new_data_offset; }
- Collections& GetCollections() { return collections_; }
+ IndexedCollectionVector<StringId>& StringIds() { return string_ids_; }
+ const IndexedCollectionVector<StringId>& StringIds() const { return string_ids_; }
+ IndexedCollectionVector<TypeId>& TypeIds() { return type_ids_; }
+ const IndexedCollectionVector<TypeId>& TypeIds() const { return type_ids_; }
+ IndexedCollectionVector<ProtoId>& ProtoIds() { return proto_ids_; }
+ const IndexedCollectionVector<ProtoId>& ProtoIds() const { return proto_ids_; }
+ IndexedCollectionVector<FieldId>& FieldIds() { return field_ids_; }
+ const IndexedCollectionVector<FieldId>& FieldIds() const { return field_ids_; }
+ IndexedCollectionVector<MethodId>& MethodIds() { return method_ids_; }
+ const IndexedCollectionVector<MethodId>& MethodIds() const { return method_ids_; }
+ IndexedCollectionVector<ClassDef>& ClassDefs() { return class_defs_; }
+ const IndexedCollectionVector<ClassDef>& ClassDefs() const { return class_defs_; }
+ IndexedCollectionVector<CallSiteId>& CallSiteIds() { return call_site_ids_; }
+ const IndexedCollectionVector<CallSiteId>& CallSiteIds() const { return call_site_ids_; }
+ IndexedCollectionVector<MethodHandleItem>& MethodHandleItems() { return method_handle_items_; }
+ const IndexedCollectionVector<MethodHandleItem>& MethodHandleItems() const {
+ return method_handle_items_;
+ }
+ CollectionVector<StringData>& StringDatas() { return string_datas_; }
+ const CollectionVector<StringData>& StringDatas() const { return string_datas_; }
+ CollectionVector<TypeList>& TypeLists() { return type_lists_; }
+ const CollectionVector<TypeList>& TypeLists() const { return type_lists_; }
+ CollectionVector<EncodedArrayItem>& EncodedArrayItems() { return encoded_array_items_; }
+ const CollectionVector<EncodedArrayItem>& EncodedArrayItems() const {
+ return encoded_array_items_;
+ }
+ CollectionVector<AnnotationItem>& AnnotationItems() { return annotation_items_; }
+ const CollectionVector<AnnotationItem>& AnnotationItems() const { return annotation_items_; }
+ CollectionVector<AnnotationSetItem>& AnnotationSetItems() { return annotation_set_items_; }
+ const CollectionVector<AnnotationSetItem>& AnnotationSetItems() const {
+ return annotation_set_items_;
+ }
+ CollectionVector<AnnotationSetRefList>& AnnotationSetRefLists() {
+ return annotation_set_ref_lists_;
+ }
+ const CollectionVector<AnnotationSetRefList>& AnnotationSetRefLists() const {
+ return annotation_set_ref_lists_;
+ }
+ CollectionVector<AnnotationsDirectoryItem>& AnnotationsDirectoryItems() {
+ return annotations_directory_items_;
+ }
+ const CollectionVector<AnnotationsDirectoryItem>& AnnotationsDirectoryItems() const {
+ return annotations_directory_items_;
+ }
+ CollectionVector<DebugInfoItem>& DebugInfoItems() { return debug_info_items_; }
+ const CollectionVector<DebugInfoItem>& DebugInfoItems() const { return debug_info_items_; }
+ CollectionVector<CodeItem>& CodeItems() { return code_items_; }
+ const CollectionVector<CodeItem>& CodeItems() const { return code_items_; }
+ CollectionVector<ClassData>& ClassDatas() { return class_datas_; }
+ const CollectionVector<ClassData>& ClassDatas() const { return class_datas_; }
+
+ StringId* GetStringIdOrNullPtr(uint32_t index) {
+ return index == dex::kDexNoIndex ? nullptr : StringIds()[index];
+ }
+ TypeId* GetTypeIdOrNullPtr(uint16_t index) {
+ return index == DexFile::kDexNoIndex16 ? nullptr : TypeIds()[index];
+ }
+
+ uint32_t MapListOffset() const { return map_list_offset_; }
+ void SetMapListOffset(uint32_t new_offset) { map_list_offset_ = new_offset; }
+
+ const std::vector<uint8_t>& LinkData() const { return link_data_; }
+ void SetLinkData(std::vector<uint8_t>&& link_data) { link_data_ = std::move(link_data); }
void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
@@ -596,7 +515,56 @@ class Header : public Item {
uint32_t data_offset_;
const bool support_default_methods_;
- Collections collections_;
+ void ConstructorHelper(const uint8_t* magic,
+ uint32_t checksum,
+ const uint8_t* signature,
+ uint32_t endian_tag,
+ uint32_t file_size,
+ uint32_t header_size,
+ uint32_t link_size,
+ uint32_t link_offset,
+ uint32_t data_size,
+ uint32_t data_offset) {
+ checksum_ = checksum;
+ endian_tag_ = endian_tag;
+ file_size_ = file_size;
+ header_size_ = header_size;
+ link_size_ = link_size;
+ link_offset_ = link_offset;
+ data_size_ = data_size;
+ data_offset_ = data_offset;
+ memcpy(magic_, magic, sizeof(magic_));
+ memcpy(signature_, signature, sizeof(signature_));
+ }
+
+ // Collection vectors own the IR data.
+ IndexedCollectionVector<StringId> string_ids_;
+ IndexedCollectionVector<TypeId> type_ids_;
+ IndexedCollectionVector<ProtoId> proto_ids_;
+ IndexedCollectionVector<FieldId> field_ids_;
+ IndexedCollectionVector<MethodId> method_ids_;
+ IndexedCollectionVector<ClassDef> class_defs_;
+ IndexedCollectionVector<CallSiteId> call_site_ids_;
+ IndexedCollectionVector<MethodHandleItem> method_handle_items_;
+ IndexedCollectionVector<StringData> string_datas_;
+ IndexedCollectionVector<TypeList> type_lists_;
+ IndexedCollectionVector<EncodedArrayItem> encoded_array_items_;
+ IndexedCollectionVector<AnnotationItem> annotation_items_;
+ IndexedCollectionVector<AnnotationSetItem> annotation_set_items_;
+ IndexedCollectionVector<AnnotationSetRefList> annotation_set_ref_lists_;
+ IndexedCollectionVector<AnnotationsDirectoryItem> annotations_directory_items_;
+ // The order of the vectors controls the layout of the output file by index order, to change the
+ // layout just sort the vector. Note that you may only change the order of the non indexed vectors
+ // below. Indexed vectors are accessed by indices in other places, changing the sorting order will
+ // invalidate the existing indices and is not currently supported.
+ CollectionVector<DebugInfoItem> debug_info_items_;
+ CollectionVector<CodeItem> code_items_;
+ CollectionVector<ClassData> class_datas_;
+
+ uint32_t map_list_offset_ = 0;
+
+ // Link data.
+ std::vector<uint8_t> link_data_;
DISALLOW_COPY_AND_ASSIGN(Header);
};
@@ -744,6 +712,8 @@ class FieldItem : public Item {
: access_flags_(access_flags), field_id_(field_id) { }
~FieldItem() OVERRIDE { }
+ FieldItem(FieldItem&&) = default;
+
uint32_t GetAccessFlags() const { return access_flags_; }
const FieldId* GetFieldId() const { return field_id_; }
@@ -756,7 +726,7 @@ class FieldItem : public Item {
DISALLOW_COPY_AND_ASSIGN(FieldItem);
};
-using FieldItemVector = std::vector<std::unique_ptr<FieldItem>>;
+using FieldItemVector = std::vector<FieldItem>;
class MethodItem : public Item {
public:
@@ -764,6 +734,8 @@ class MethodItem : public Item {
: access_flags_(access_flags), method_id_(method_id), code_(code) { }
~MethodItem() OVERRIDE { }
+ MethodItem(MethodItem&&) = default;
+
uint32_t GetAccessFlags() const { return access_flags_; }
const MethodId* GetMethodId() const { return method_id_; }
CodeItem* GetCodeItem() { return code_; }
@@ -778,7 +750,7 @@ class MethodItem : public Item {
DISALLOW_COPY_AND_ASSIGN(MethodItem);
};
-using MethodItemVector = std::vector<std::unique_ptr<MethodItem>>;
+using MethodItemVector = std::vector<MethodItem>;
class EncodedValue {
public:
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index 4f9bcdd742..a04a2349c4 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -20,14 +20,226 @@
#include <vector>
#include "dex_ir_builder.h"
+
+#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_file_exception_helpers.h"
#include "dexlayout.h"
namespace art {
namespace dex_ir {
-static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
- Collections* collections,
- const Options& options);
+static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
+ uint64_t value = 0;
+ for (uint32_t i = 0; i <= length; i++) {
+ value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
+ }
+ if (sign_extend) {
+ int shift = (7 - length) * 8;
+ return (static_cast<int64_t>(value) << shift) >> shift;
+ }
+ return value;
+}
+
+static uint32_t GetDebugInfoStreamSize(const uint8_t* debug_info_stream) {
+ const uint8_t* stream = debug_info_stream;
+ DecodeUnsignedLeb128(&stream); // line_start
+ uint32_t parameters_size = DecodeUnsignedLeb128(&stream);
+ for (uint32_t i = 0; i < parameters_size; ++i) {
+ DecodeUnsignedLeb128P1(&stream); // Parameter name.
+ }
+
+ for (;;) {
+ uint8_t opcode = *stream++;
+ switch (opcode) {
+ case DexFile::DBG_END_SEQUENCE:
+ return stream - debug_info_stream; // end of stream.
+ case DexFile::DBG_ADVANCE_PC:
+ DecodeUnsignedLeb128(&stream); // addr_diff
+ break;
+ case DexFile::DBG_ADVANCE_LINE:
+ DecodeSignedLeb128(&stream); // line_diff
+ break;
+ case DexFile::DBG_START_LOCAL:
+ DecodeUnsignedLeb128(&stream); // register_num
+ DecodeUnsignedLeb128P1(&stream); // name_idx
+ DecodeUnsignedLeb128P1(&stream); // type_idx
+ break;
+ case DexFile::DBG_START_LOCAL_EXTENDED:
+ DecodeUnsignedLeb128(&stream); // register_num
+ DecodeUnsignedLeb128P1(&stream); // name_idx
+ DecodeUnsignedLeb128P1(&stream); // type_idx
+ DecodeUnsignedLeb128P1(&stream); // sig_idx
+ break;
+ case DexFile::DBG_END_LOCAL:
+ case DexFile::DBG_RESTART_LOCAL:
+ DecodeUnsignedLeb128(&stream); // register_num
+ break;
+ case DexFile::DBG_SET_PROLOGUE_END:
+ case DexFile::DBG_SET_EPILOGUE_BEGIN:
+ break;
+ case DexFile::DBG_SET_FILE: {
+ DecodeUnsignedLeb128P1(&stream); // name_idx
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+}
+
+template<class T> class CollectionMap : public CollectionBase {
+ public:
+ CollectionMap() = default;
+ virtual ~CollectionMap() OVERRIDE { }
+
+ template <class... Args>
+ T* CreateAndAddItem(CollectionVector<T>& vector,
+ bool eagerly_assign_offsets,
+ uint32_t offset,
+ Args&&... args) {
+ T* item = vector.CreateAndAddItem(std::forward<Args>(args)...);
+ DCHECK(!GetExistingObject(offset));
+ DCHECK(!item->OffsetAssigned());
+ if (eagerly_assign_offsets) {
+ item->SetOffset(offset);
+ }
+ AddItem(item, offset);
+ return item;
+ }
+
+ // Returns the existing item if it is already inserted, null otherwise.
+ T* GetExistingObject(uint32_t offset) {
+ auto it = collection_.find(offset);
+ return it != collection_.end() ? it->second : nullptr;
+ }
+
+ // Lower case for template interop with std::map.
+ uint32_t size() const { return collection_.size(); }
+ std::map<uint32_t, T*>& Collection() { return collection_; }
+
+ private:
+ std::map<uint32_t, T*> collection_;
+
+ // CollectionMaps do not own the objects they contain, therefore AddItem is supported
+ // rather than CreateAndAddItem.
+ void AddItem(T* object, uint32_t offset) {
+ auto it = collection_.emplace(offset, object);
+ CHECK(it.second) << "CollectionMap already has an object with offset " << offset << " "
+ << " and address " << it.first->second;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(CollectionMap);
+};
+
+class BuilderMaps {
+ public:
+ BuilderMaps(Header* header, bool eagerly_assign_offsets)
+ : header_(header), eagerly_assign_offsets_(eagerly_assign_offsets) { }
+
+ void CreateStringId(const DexFile& dex_file, uint32_t i);
+ void CreateTypeId(const DexFile& dex_file, uint32_t i);
+ void CreateProtoId(const DexFile& dex_file, uint32_t i);
+ void CreateFieldId(const DexFile& dex_file, uint32_t i);
+ void CreateMethodId(const DexFile& dex_file, uint32_t i);
+ void CreateClassDef(const DexFile& dex_file, uint32_t i);
+ void CreateCallSiteId(const DexFile& dex_file, uint32_t i);
+ void CreateMethodHandleItem(const DexFile& dex_file, uint32_t i);
+
+ void CreateCallSitesAndMethodHandles(const DexFile& dex_file);
+
+ TypeList* CreateTypeList(const DexFile::TypeList* type_list, uint32_t offset);
+ EncodedArrayItem* CreateEncodedArrayItem(const DexFile& dex_file,
+ const uint8_t* static_data,
+ uint32_t offset);
+ AnnotationItem* CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation);
+ AnnotationSetItem* CreateAnnotationSetItem(const DexFile& dex_file,
+ const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
+ AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
+ CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index);
+ ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
+
+ void AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count);
+
+ void CheckAndSetRemainingOffsets(const DexFile& dex_file, const Options& options);
+
+ // Sort the vectors buy map order (same order that was used in the input file).
+ void SortVectorsByMapOrder();
+
+ private:
+ bool GetIdsFromByteCode(const CodeItem* code,
+ std::vector<TypeId*>* type_ids,
+ std::vector<StringId*>* string_ids,
+ std::vector<MethodId*>* method_ids,
+ std::vector<FieldId*>* field_ids);
+
+ bool GetIdFromInstruction(const Instruction* dec_insn,
+ std::vector<TypeId*>* type_ids,
+ std::vector<StringId*>* string_ids,
+ std::vector<MethodId*>* method_ids,
+ std::vector<FieldId*>* field_ids);
+
+ EncodedValue* ReadEncodedValue(const DexFile& dex_file, const uint8_t** data);
+ EncodedValue* ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length);
+ void ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ EncodedValue* item);
+
+ MethodItem GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii);
+
+ ParameterAnnotation* GenerateParameterAnnotation(
+ const DexFile& dex_file,
+ MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ uint32_t offset);
+
+ template <typename Type, class... Args>
+ Type* CreateAndAddIndexedItem(IndexedCollectionVector<Type>& vector,
+ uint32_t offset,
+ uint32_t index,
+ Args&&... args) {
+ Type* item = vector.CreateAndAddIndexedItem(index, std::forward<Args>(args)...);
+ DCHECK(!item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ item->SetOffset(offset);
+ }
+ return item;
+ }
+
+ Header* header_;
+ // If we eagerly assign offsets during IR building or later after layout. Must be false if
+ // changing the layout is enabled.
+ bool eagerly_assign_offsets_;
+
+ // Note: maps do not have ownership.
+ CollectionMap<StringData> string_datas_map_;
+ CollectionMap<TypeList> type_lists_map_;
+ CollectionMap<EncodedArrayItem> encoded_array_items_map_;
+ CollectionMap<AnnotationItem> annotation_items_map_;
+ CollectionMap<AnnotationSetItem> annotation_set_items_map_;
+ CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_map_;
+ CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_map_;
+ CollectionMap<DebugInfoItem> debug_info_items_map_;
+ // Code item maps need to check both the debug info offset and debug info offset, do not use
+ // CollectionMap.
+ // First offset is the code item offset, second is the debug info offset.
+ std::map<std::pair<uint32_t, uint32_t>, CodeItem*> code_items_map_;
+ CollectionMap<ClassData> class_datas_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuilderMaps);
+};
Header* DexIrBuilder(const DexFile& dex_file,
bool eagerly_assign_offsets,
@@ -43,37 +255,42 @@ Header* DexIrBuilder(const DexFile& dex_file,
disk_header.link_off_,
disk_header.data_size_,
disk_header.data_off_,
- dex_file.SupportsDefaultMethods());
- Collections& collections = header->GetCollections();
- collections.SetEagerlyAssignOffsets(eagerly_assign_offsets);
+ dex_file.SupportsDefaultMethods(),
+ dex_file.NumStringIds(),
+ dex_file.NumTypeIds(),
+ dex_file.NumProtoIds(),
+ dex_file.NumFieldIds(),
+ dex_file.NumMethodIds(),
+ dex_file.NumClassDefs());
+ BuilderMaps builder_maps(header, eagerly_assign_offsets);
// Walk the rest of the header fields.
// StringId table.
- collections.SetStringIdsOffset(disk_header.string_ids_off_);
+ header->StringIds().SetOffset(disk_header.string_ids_off_);
for (uint32_t i = 0; i < dex_file.NumStringIds(); ++i) {
- collections.CreateStringId(dex_file, i);
+ builder_maps.CreateStringId(dex_file, i);
}
// TypeId table.
- collections.SetTypeIdsOffset(disk_header.type_ids_off_);
+ header->TypeIds().SetOffset(disk_header.type_ids_off_);
for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
- collections.CreateTypeId(dex_file, i);
+ builder_maps.CreateTypeId(dex_file, i);
}
// ProtoId table.
- collections.SetProtoIdsOffset(disk_header.proto_ids_off_);
+ header->ProtoIds().SetOffset(disk_header.proto_ids_off_);
for (uint32_t i = 0; i < dex_file.NumProtoIds(); ++i) {
- collections.CreateProtoId(dex_file, i);
+ builder_maps.CreateProtoId(dex_file, i);
}
// FieldId table.
- collections.SetFieldIdsOffset(disk_header.field_ids_off_);
+ header->FieldIds().SetOffset(disk_header.field_ids_off_);
for (uint32_t i = 0; i < dex_file.NumFieldIds(); ++i) {
- collections.CreateFieldId(dex_file, i);
+ builder_maps.CreateFieldId(dex_file, i);
}
// MethodId table.
- collections.SetMethodIdsOffset(disk_header.method_ids_off_);
+ header->MethodIds().SetOffset(disk_header.method_ids_off_);
for (uint32_t i = 0; i < dex_file.NumMethodIds(); ++i) {
- collections.CreateMethodId(dex_file, i);
+ builder_maps.CreateMethodId(dex_file, i);
}
// ClassDef table.
- collections.SetClassDefsOffset(disk_header.class_defs_off_);
+ header->ClassDefs().SetOffset(disk_header.class_defs_off_);
for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
if (!options.class_filter_.empty()) {
// If the filter is enabled (not empty), filter out classes that don't have a matching
@@ -84,28 +301,29 @@ Header* DexIrBuilder(const DexFile& dex_file,
continue;
}
}
- collections.CreateClassDef(dex_file, i);
+ builder_maps.CreateClassDef(dex_file, i);
}
// MapItem.
- collections.SetMapListOffset(disk_header.map_off_);
+ header->SetMapListOffset(disk_header.map_off_);
// CallSiteIds and MethodHandleItems.
- collections.CreateCallSitesAndMethodHandles(dex_file);
- CheckAndSetRemainingOffsets(dex_file, &collections, options);
+ builder_maps.CreateCallSitesAndMethodHandles(dex_file);
+ builder_maps.CheckAndSetRemainingOffsets(dex_file, options);
// Sort the vectors by the map order (same order as the file).
- collections.SortVectorsByMapOrder();
+ builder_maps.SortVectorsByMapOrder();
// Load the link data if it exists.
- collections.SetLinkData(std::vector<uint8_t>(
+ header->SetLinkData(std::vector<uint8_t>(
dex_file.DataBegin() + dex_file.GetHeader().link_off_,
dex_file.DataBegin() + dex_file.GetHeader().link_off_ + dex_file.GetHeader().link_size_));
return header;
}
-static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
- Collections* collections,
- const Options& options) {
+/*
+ * Get all the types, strings, methods, and fields referred to from bytecode.
+ */
+void BuilderMaps::CheckAndSetRemainingOffsets(const DexFile& dex_file, const Options& options) {
const DexFile::Header& disk_header = dex_file.GetHeader();
// Read MapItems and validate/set remaining offsets.
const DexFile::MapList* map = dex_file.GetMapList();
@@ -118,74 +336,74 @@ static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
CHECK_EQ(item->offset_, 0u);
break;
case DexFile::kDexTypeStringIdItem:
- CHECK_EQ(item->size_, collections->StringIdsSize());
- CHECK_EQ(item->offset_, collections->StringIdsOffset());
+ CHECK_EQ(item->size_, header_->StringIds().Size());
+ CHECK_EQ(item->offset_, header_->StringIds().GetOffset());
break;
case DexFile::kDexTypeTypeIdItem:
- CHECK_EQ(item->size_, collections->TypeIdsSize());
- CHECK_EQ(item->offset_, collections->TypeIdsOffset());
+ CHECK_EQ(item->size_, header_->TypeIds().Size());
+ CHECK_EQ(item->offset_, header_->TypeIds().GetOffset());
break;
case DexFile::kDexTypeProtoIdItem:
- CHECK_EQ(item->size_, collections->ProtoIdsSize());
- CHECK_EQ(item->offset_, collections->ProtoIdsOffset());
+ CHECK_EQ(item->size_, header_->ProtoIds().Size());
+ CHECK_EQ(item->offset_, header_->ProtoIds().GetOffset());
break;
case DexFile::kDexTypeFieldIdItem:
- CHECK_EQ(item->size_, collections->FieldIdsSize());
- CHECK_EQ(item->offset_, collections->FieldIdsOffset());
+ CHECK_EQ(item->size_, header_->FieldIds().Size());
+ CHECK_EQ(item->offset_, header_->FieldIds().GetOffset());
break;
case DexFile::kDexTypeMethodIdItem:
- CHECK_EQ(item->size_, collections->MethodIdsSize());
- CHECK_EQ(item->offset_, collections->MethodIdsOffset());
+ CHECK_EQ(item->size_, header_->MethodIds().Size());
+ CHECK_EQ(item->offset_, header_->MethodIds().GetOffset());
break;
case DexFile::kDexTypeClassDefItem:
if (options.class_filter_.empty()) {
// The filter may have removed some classes, this will get fixed up during writing.
- CHECK_EQ(item->size_, collections->ClassDefsSize());
+ CHECK_EQ(item->size_, header_->ClassDefs().Size());
}
- CHECK_EQ(item->offset_, collections->ClassDefsOffset());
+ CHECK_EQ(item->offset_, header_->ClassDefs().GetOffset());
break;
case DexFile::kDexTypeCallSiteIdItem:
- CHECK_EQ(item->size_, collections->CallSiteIdsSize());
- CHECK_EQ(item->offset_, collections->CallSiteIdsOffset());
+ CHECK_EQ(item->size_, header_->CallSiteIds().Size());
+ CHECK_EQ(item->offset_, header_->CallSiteIds().GetOffset());
break;
case DexFile::kDexTypeMethodHandleItem:
- CHECK_EQ(item->size_, collections->MethodHandleItemsSize());
- CHECK_EQ(item->offset_, collections->MethodHandleItemsOffset());
+ CHECK_EQ(item->size_, header_->MethodHandleItems().Size());
+ CHECK_EQ(item->offset_, header_->MethodHandleItems().GetOffset());
break;
case DexFile::kDexTypeMapList:
CHECK_EQ(item->size_, 1u);
CHECK_EQ(item->offset_, disk_header.map_off_);
break;
case DexFile::kDexTypeTypeList:
- collections->SetTypeListsOffset(item->offset_);
+ header_->TypeLists().SetOffset(item->offset_);
break;
case DexFile::kDexTypeAnnotationSetRefList:
- collections->SetAnnotationSetRefListsOffset(item->offset_);
+ header_->AnnotationSetRefLists().SetOffset(item->offset_);
break;
case DexFile::kDexTypeAnnotationSetItem:
- collections->SetAnnotationSetItemsOffset(item->offset_);
+ header_->AnnotationSetItems().SetOffset(item->offset_);
break;
case DexFile::kDexTypeClassDataItem:
- collections->SetClassDatasOffset(item->offset_);
+ header_->ClassDatas().SetOffset(item->offset_);
break;
case DexFile::kDexTypeCodeItem:
- collections->SetCodeItemsOffset(item->offset_);
+ header_->CodeItems().SetOffset(item->offset_);
break;
case DexFile::kDexTypeStringDataItem:
- collections->SetStringDatasOffset(item->offset_);
+ header_->StringDatas().SetOffset(item->offset_);
break;
case DexFile::kDexTypeDebugInfoItem:
- collections->SetDebugInfoItemsOffset(item->offset_);
+ header_->DebugInfoItems().SetOffset(item->offset_);
break;
case DexFile::kDexTypeAnnotationItem:
- collections->SetAnnotationItemsOffset(item->offset_);
- collections->AddAnnotationsFromMapListSection(dex_file, item->offset_, item->size_);
+ header_->AnnotationItems().SetOffset(item->offset_);
+ AddAnnotationsFromMapListSection(dex_file, item->offset_, item->size_);
break;
case DexFile::kDexTypeEncodedArrayItem:
- collections->SetEncodedArrayItemsOffset(item->offset_);
+ header_->EncodedArrayItems().SetOffset(item->offset_);
break;
case DexFile::kDexTypeAnnotationsDirectoryItem:
- collections->SetAnnotationsDirectoryItemsOffset(item->offset_);
+ header_->AnnotationsDirectoryItems().SetOffset(item->offset_);
break;
default:
LOG(ERROR) << "Unknown map list item type.";
@@ -193,5 +411,798 @@ static void CheckAndSetRemainingOffsets(const DexFile& dex_file,
}
}
+void BuilderMaps::CreateStringId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::StringId& disk_string_id = dex_file.GetStringId(dex::StringIndex(i));
+ StringData* string_data =
+ string_datas_map_.CreateAndAddItem(header_->StringDatas(),
+ eagerly_assign_offsets_,
+ disk_string_id.string_data_off_,
+ dex_file.GetStringData(disk_string_id));
+ CreateAndAddIndexedItem(header_->StringIds(),
+ header_->StringIds().GetOffset() + i * StringId::ItemSize(),
+ i,
+ string_data);
+}
+
+void BuilderMaps::CreateTypeId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::TypeId& disk_type_id = dex_file.GetTypeId(dex::TypeIndex(i));
+ CreateAndAddIndexedItem(header_->TypeIds(),
+ header_->TypeIds().GetOffset() + i * TypeId::ItemSize(),
+ i,
+ header_->StringIds()[disk_type_id.descriptor_idx_.index_]);
+}
+
+void BuilderMaps::CreateProtoId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::ProtoId& disk_proto_id = dex_file.GetProtoId(dex::ProtoIndex(i));
+ const DexFile::TypeList* type_list = dex_file.GetProtoParameters(disk_proto_id);
+ TypeList* parameter_type_list = CreateTypeList(type_list, disk_proto_id.parameters_off_);
+
+ CreateAndAddIndexedItem(header_->ProtoIds(),
+ header_->ProtoIds().GetOffset() + i * ProtoId::ItemSize(),
+ i,
+ header_->StringIds()[disk_proto_id.shorty_idx_.index_],
+ header_->TypeIds()[disk_proto_id.return_type_idx_.index_],
+ parameter_type_list);
+}
+
+void BuilderMaps::CreateFieldId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::FieldId& disk_field_id = dex_file.GetFieldId(i);
+ CreateAndAddIndexedItem(header_->FieldIds(),
+ header_->FieldIds().GetOffset() + i * FieldId::ItemSize(),
+ i,
+ header_->TypeIds()[disk_field_id.class_idx_.index_],
+ header_->TypeIds()[disk_field_id.type_idx_.index_],
+ header_->StringIds()[disk_field_id.name_idx_.index_]);
+}
+
+void BuilderMaps::CreateMethodId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::MethodId& disk_method_id = dex_file.GetMethodId(i);
+ CreateAndAddIndexedItem(header_->MethodIds(),
+ header_->MethodIds().GetOffset() + i * MethodId::ItemSize(),
+ i,
+ header_->TypeIds()[disk_method_id.class_idx_.index_],
+ header_->ProtoIds()[disk_method_id.proto_idx_.index_],
+ header_->StringIds()[disk_method_id.name_idx_.index_]);
+}
+
+void BuilderMaps::CreateClassDef(const DexFile& dex_file, uint32_t i) {
+ const DexFile::ClassDef& disk_class_def = dex_file.GetClassDef(i);
+ const TypeId* class_type = header_->TypeIds()[disk_class_def.class_idx_.index_];
+ uint32_t access_flags = disk_class_def.access_flags_;
+ const TypeId* superclass = header_->GetTypeIdOrNullPtr(disk_class_def.superclass_idx_.index_);
+
+ const DexFile::TypeList* type_list = dex_file.GetInterfacesList(disk_class_def);
+ TypeList* interfaces_type_list = CreateTypeList(type_list, disk_class_def.interfaces_off_);
+
+ const StringId* source_file =
+ header_->GetStringIdOrNullPtr(disk_class_def.source_file_idx_.index_);
+ // Annotations.
+ AnnotationsDirectoryItem* annotations = nullptr;
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+ dex_file.GetAnnotationsDirectory(disk_class_def);
+ if (disk_annotations_directory_item != nullptr) {
+ annotations = CreateAnnotationsDirectoryItem(
+ dex_file, disk_annotations_directory_item, disk_class_def.annotations_off_);
+ }
+ // Static field initializers.
+ const uint8_t* static_data = dex_file.GetEncodedStaticFieldValuesArray(disk_class_def);
+ EncodedArrayItem* static_values =
+ CreateEncodedArrayItem(dex_file, static_data, disk_class_def.static_values_off_);
+ ClassData* class_data = CreateClassData(
+ dex_file, dex_file.GetClassData(disk_class_def), disk_class_def.class_data_off_);
+ CreateAndAddIndexedItem(header_->ClassDefs(),
+ header_->ClassDefs().GetOffset() + i * ClassDef::ItemSize(),
+ i,
+ class_type,
+ access_flags,
+ superclass,
+ interfaces_type_list,
+ source_file,
+ annotations,
+ static_values,
+ class_data);
+}
+
+void BuilderMaps::CreateCallSiteId(const DexFile& dex_file, uint32_t i) {
+ const DexFile::CallSiteIdItem& disk_call_site_id = dex_file.GetCallSiteId(i);
+ const uint8_t* disk_call_item_ptr = dex_file.DataBegin() + disk_call_site_id.data_off_;
+ EncodedArrayItem* call_site_item =
+ CreateEncodedArrayItem(dex_file, disk_call_item_ptr, disk_call_site_id.data_off_);
+
+ CreateAndAddIndexedItem(header_->CallSiteIds(),
+ header_->CallSiteIds().GetOffset() + i * CallSiteId::ItemSize(),
+ i,
+ call_site_item);
+}
+
+void BuilderMaps::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
+ const DexFile::MethodHandleItem& disk_method_handle = dex_file.GetMethodHandle(i);
+ uint16_t index = disk_method_handle.field_or_method_idx_;
+ DexFile::MethodHandleType type =
+ static_cast<DexFile::MethodHandleType>(disk_method_handle.method_handle_type_);
+ bool is_invoke = type == DexFile::MethodHandleType::kInvokeStatic ||
+ type == DexFile::MethodHandleType::kInvokeInstance ||
+ type == DexFile::MethodHandleType::kInvokeConstructor ||
+ type == DexFile::MethodHandleType::kInvokeDirect ||
+ type == DexFile::MethodHandleType::kInvokeInterface;
+ static_assert(DexFile::MethodHandleType::kLast == DexFile::MethodHandleType::kInvokeInterface,
+ "Unexpected method handle types.");
+ IndexedItem* field_or_method_id;
+ if (is_invoke) {
+ field_or_method_id = header_->MethodIds()[index];
+ } else {
+ field_or_method_id = header_->FieldIds()[index];
+ }
+ CreateAndAddIndexedItem(header_->MethodHandleItems(),
+ header_->MethodHandleItems().GetOffset() +
+ i * MethodHandleItem::ItemSize(),
+ i,
+ type,
+ field_or_method_id);
+}
+
+void BuilderMaps::CreateCallSitesAndMethodHandles(const DexFile& dex_file) {
+ // Iterate through the map list and set the offset of the CallSiteIds and MethodHandleItems.
+ const DexFile::MapList* map = dex_file.GetMapList();
+ for (uint32_t i = 0; i < map->size_; ++i) {
+ const DexFile::MapItem* item = map->list_ + i;
+ switch (item->type_) {
+ case DexFile::kDexTypeCallSiteIdItem:
+ header_->CallSiteIds().SetOffset(item->offset_);
+ break;
+ case DexFile::kDexTypeMethodHandleItem:
+ header_->MethodHandleItems().SetOffset(item->offset_);
+ break;
+ default:
+ break;
+ }
+ }
+ // Populate MethodHandleItems first (CallSiteIds may depend on them).
+ for (uint32_t i = 0; i < dex_file.NumMethodHandles(); i++) {
+ CreateMethodHandleItem(dex_file, i);
+ }
+ // Populate CallSiteIds.
+ for (uint32_t i = 0; i < dex_file.NumCallSiteIds(); i++) {
+ CreateCallSiteId(dex_file, i);
+ }
+}
+
+TypeList* BuilderMaps::CreateTypeList(const DexFile::TypeList* dex_type_list, uint32_t offset) {
+ if (dex_type_list == nullptr) {
+ return nullptr;
+ }
+ TypeList* type_list = type_lists_map_.GetExistingObject(offset);
+ if (type_list == nullptr) {
+ TypeIdVector* type_vector = new TypeIdVector();
+ uint32_t size = dex_type_list->Size();
+ for (uint32_t index = 0; index < size; ++index) {
+ type_vector->push_back(header_->TypeIds()[
+ dex_type_list->GetTypeItem(index).type_idx_.index_]);
+ }
+ type_list = type_lists_map_.CreateAndAddItem(header_->TypeLists(),
+ eagerly_assign_offsets_,
+ offset,
+ type_vector);
+ }
+ return type_list;
+}
+
+EncodedArrayItem* BuilderMaps::CreateEncodedArrayItem(const DexFile& dex_file,
+ const uint8_t* static_data,
+ uint32_t offset) {
+ if (static_data == nullptr) {
+ return nullptr;
+ }
+ EncodedArrayItem* encoded_array_item = encoded_array_items_map_.GetExistingObject(offset);
+ if (encoded_array_item == nullptr) {
+ uint32_t size = DecodeUnsignedLeb128(&static_data);
+ EncodedValueVector* values = new EncodedValueVector();
+ for (uint32_t i = 0; i < size; ++i) {
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, &static_data)));
+ }
+ // TODO: Calculate the size of the encoded array.
+ encoded_array_item = encoded_array_items_map_.CreateAndAddItem(header_->EncodedArrayItems(),
+ eagerly_assign_offsets_,
+ offset,
+ values);
+ }
+ return encoded_array_item;
+}
+
+void BuilderMaps::AddAnnotationsFromMapListSection(const DexFile& dex_file,
+ uint32_t start_offset,
+ uint32_t count) {
+ uint32_t current_offset = start_offset;
+ for (size_t i = 0; i < count; ++i) {
+ // Annotation that we didn't process already, add it to the set.
+ const DexFile::AnnotationItem* annotation = dex_file.GetAnnotationItemAtOffset(current_offset);
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
+ DCHECK(annotation_item != nullptr);
+ current_offset += annotation_item->GetSize();
+ }
+}
+
+AnnotationItem* BuilderMaps::CreateAnnotationItem(const DexFile& dex_file,
+ const DexFile::AnnotationItem* annotation) {
+ const uint8_t* const start_data = reinterpret_cast<const uint8_t*>(annotation);
+ const uint32_t offset = start_data - dex_file.DataBegin();
+ AnnotationItem* annotation_item = annotation_items_map_.GetExistingObject(offset);
+ if (annotation_item == nullptr) {
+ uint8_t visibility = annotation->visibility_;
+ const uint8_t* annotation_data = annotation->annotation_;
+ std::unique_ptr<EncodedValue> encoded_value(
+ ReadEncodedValue(dex_file, &annotation_data, DexFile::kDexAnnotationAnnotation, 0));
+ annotation_item =
+ annotation_items_map_.CreateAndAddItem(header_->AnnotationItems(),
+ eagerly_assign_offsets_,
+ offset,
+ visibility,
+ encoded_value->ReleaseEncodedAnnotation());
+ annotation_item->SetSize(annotation_data - start_data);
+ }
+ return annotation_item;
+}
+
+
+AnnotationSetItem* BuilderMaps::CreateAnnotationSetItem(const DexFile& dex_file,
+ const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset) {
+ if (disk_annotations_item == nullptr || (disk_annotations_item->size_ == 0 && offset == 0)) {
+ return nullptr;
+ }
+ AnnotationSetItem* annotation_set_item = annotation_set_items_map_.GetExistingObject(offset);
+ if (annotation_set_item == nullptr) {
+ std::vector<AnnotationItem*>* items = new std::vector<AnnotationItem*>();
+ for (uint32_t i = 0; i < disk_annotations_item->size_; ++i) {
+ const DexFile::AnnotationItem* annotation =
+ dex_file.GetAnnotationItem(disk_annotations_item, i);
+ if (annotation == nullptr) {
+ continue;
+ }
+ AnnotationItem* annotation_item = CreateAnnotationItem(dex_file, annotation);
+ items->push_back(annotation_item);
+ }
+ annotation_set_item =
+ annotation_set_items_map_.CreateAndAddItem(header_->AnnotationSetItems(),
+ eagerly_assign_offsets_,
+ offset,
+ items);
+ }
+ return annotation_set_item;
+}
+
+AnnotationsDirectoryItem* BuilderMaps::CreateAnnotationsDirectoryItem(const DexFile& dex_file,
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset) {
+ AnnotationsDirectoryItem* annotations_directory_item =
+ annotations_directory_items_map_.GetExistingObject(offset);
+ if (annotations_directory_item != nullptr) {
+ return annotations_directory_item;
+ }
+ const DexFile::AnnotationSetItem* class_set_item =
+ dex_file.GetClassAnnotationSet(disk_annotations_item);
+ AnnotationSetItem* class_annotation = nullptr;
+ if (class_set_item != nullptr) {
+ uint32_t item_offset = disk_annotations_item->class_annotations_off_;
+ class_annotation = CreateAnnotationSetItem(dex_file, class_set_item, item_offset);
+ }
+ const DexFile::FieldAnnotationsItem* fields =
+ dex_file.GetFieldAnnotations(disk_annotations_item);
+ FieldAnnotationVector* field_annotations = nullptr;
+ if (fields != nullptr) {
+ field_annotations = new FieldAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
+ FieldId* field_id = header_->FieldIds()[fields[i].field_idx_];
+ const DexFile::AnnotationSetItem* field_set_item =
+ dex_file.GetFieldAnnotationSetItem(fields[i]);
+ uint32_t annotation_set_offset = fields[i].annotations_off_;
+ AnnotationSetItem* annotation_set_item =
+ CreateAnnotationSetItem(dex_file, field_set_item, annotation_set_offset);
+ field_annotations->push_back(std::unique_ptr<FieldAnnotation>(
+ new FieldAnnotation(field_id, annotation_set_item)));
+ }
+ }
+ const DexFile::MethodAnnotationsItem* methods =
+ dex_file.GetMethodAnnotations(disk_annotations_item);
+ MethodAnnotationVector* method_annotations = nullptr;
+ if (methods != nullptr) {
+ method_annotations = new MethodAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
+ MethodId* method_id = header_->MethodIds()[methods[i].method_idx_];
+ const DexFile::AnnotationSetItem* method_set_item =
+ dex_file.GetMethodAnnotationSetItem(methods[i]);
+ uint32_t annotation_set_offset = methods[i].annotations_off_;
+ AnnotationSetItem* annotation_set_item =
+ CreateAnnotationSetItem(dex_file, method_set_item, annotation_set_offset);
+ method_annotations->push_back(std::unique_ptr<MethodAnnotation>(
+ new MethodAnnotation(method_id, annotation_set_item)));
+ }
+ }
+ const DexFile::ParameterAnnotationsItem* parameters =
+ dex_file.GetParameterAnnotations(disk_annotations_item);
+ ParameterAnnotationVector* parameter_annotations = nullptr;
+ if (parameters != nullptr) {
+ parameter_annotations = new ParameterAnnotationVector();
+ for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
+ MethodId* method_id = header_->MethodIds()[parameters[i].method_idx_];
+ const DexFile::AnnotationSetRefList* list =
+ dex_file.GetParameterAnnotationSetRefList(&parameters[i]);
+ parameter_annotations->push_back(std::unique_ptr<ParameterAnnotation>(
+ GenerateParameterAnnotation(dex_file, method_id, list, parameters[i].annotations_off_)));
+ }
+ }
+ // TODO: Calculate the size of the annotations directory.
+ return annotations_directory_items_map_.CreateAndAddItem(header_->AnnotationsDirectoryItems(),
+ eagerly_assign_offsets_,
+ offset,
+ class_annotation,
+ field_annotations,
+ method_annotations,
+ parameter_annotations);
+}
+
+CodeItem* BuilderMaps::DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index) {
+ if (disk_code_item == nullptr) {
+ return nullptr;
+ }
+ CodeItemDebugInfoAccessor accessor(dex_file, disk_code_item, dex_method_index);
+ const uint32_t debug_info_offset = accessor.DebugInfoOffset();
+
+ // Create the offsets pair and dedupe based on it.
+ std::pair<uint32_t, uint32_t> offsets_pair(offset, debug_info_offset);
+ auto existing = code_items_map_.find(offsets_pair);
+ if (existing != code_items_map_.end()) {
+ return existing->second;
+ }
+
+ const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(debug_info_offset);
+ DebugInfoItem* debug_info = nullptr;
+ if (debug_info_stream != nullptr) {
+ debug_info = debug_info_items_map_.GetExistingObject(debug_info_offset);
+ if (debug_info == nullptr) {
+ uint32_t debug_info_size = GetDebugInfoStreamSize(debug_info_stream);
+ uint8_t* debug_info_buffer = new uint8_t[debug_info_size];
+ memcpy(debug_info_buffer, debug_info_stream, debug_info_size);
+ debug_info = debug_info_items_map_.CreateAndAddItem(header_->DebugInfoItems(),
+ eagerly_assign_offsets_,
+ debug_info_offset,
+ debug_info_size,
+ debug_info_buffer);
+ }
+ }
+
+ uint32_t insns_size = accessor.InsnsSizeInCodeUnits();
+ uint16_t* insns = new uint16_t[insns_size];
+ memcpy(insns, accessor.Insns(), insns_size * sizeof(uint16_t));
+
+ TryItemVector* tries = nullptr;
+ CatchHandlerVector* handler_list = nullptr;
+ if (accessor.TriesSize() > 0) {
+ tries = new TryItemVector();
+ handler_list = new CatchHandlerVector();
+ for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
+ uint32_t start_addr = disk_try_item.start_addr_;
+ uint16_t insn_count = disk_try_item.insn_count_;
+ uint16_t handler_off = disk_try_item.handler_off_;
+ const CatchHandler* handlers = nullptr;
+ for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
+ if (handler_off == existing_handlers->GetListOffset()) {
+ handlers = existing_handlers.get();
+ break;
+ }
+ }
+ if (handlers == nullptr) {
+ bool catch_all = false;
+ TypeAddrPairVector* addr_pairs = new TypeAddrPairVector();
+ for (CatchHandlerIterator it(accessor, disk_try_item); it.HasNext(); it.Next()) {
+ const dex::TypeIndex type_index = it.GetHandlerTypeIndex();
+ const TypeId* type_id = header_->GetTypeIdOrNullPtr(type_index.index_);
+ catch_all |= type_id == nullptr;
+ addr_pairs->push_back(std::unique_ptr<const TypeAddrPair>(
+ new TypeAddrPair(type_id, it.GetHandlerAddress())));
+ }
+ handlers = new CatchHandler(catch_all, handler_off, addr_pairs);
+ handler_list->push_back(std::unique_ptr<const CatchHandler>(handlers));
+ }
+ TryItem* try_item = new TryItem(start_addr, insn_count, handlers);
+ tries->push_back(std::unique_ptr<const TryItem>(try_item));
+ }
+ // Manually walk catch handlers list and add any missing handlers unreferenced by try items.
+ const uint8_t* handlers_base = accessor.GetCatchHandlerData();
+ const uint8_t* handlers_data = handlers_base;
+ uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_data);
+ while (handlers_size > handler_list->size()) {
+ bool already_added = false;
+ uint16_t handler_off = handlers_data - handlers_base;
+ for (std::unique_ptr<const CatchHandler>& existing_handlers : *handler_list) {
+ if (handler_off == existing_handlers->GetListOffset()) {
+ already_added = true;
+ break;
+ }
+ }
+ int32_t size = DecodeSignedLeb128(&handlers_data);
+ bool has_catch_all = size <= 0;
+ if (has_catch_all) {
+ size = -size;
+ }
+ if (already_added) {
+ for (int32_t i = 0; i < size; i++) {
+ DecodeUnsignedLeb128(&handlers_data);
+ DecodeUnsignedLeb128(&handlers_data);
+ }
+ if (has_catch_all) {
+ DecodeUnsignedLeb128(&handlers_data);
+ }
+ continue;
+ }
+ TypeAddrPairVector* addr_pairs = new TypeAddrPairVector();
+ for (int32_t i = 0; i < size; i++) {
+ const TypeId* type_id =
+ header_->GetTypeIdOrNullPtr(DecodeUnsignedLeb128(&handlers_data));
+ uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
+ addr_pairs->push_back(
+ std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(type_id, addr)));
+ }
+ if (has_catch_all) {
+ uint32_t addr = DecodeUnsignedLeb128(&handlers_data);
+ addr_pairs->push_back(
+ std::unique_ptr<const TypeAddrPair>(new TypeAddrPair(nullptr, addr)));
+ }
+ const CatchHandler* handler = new CatchHandler(has_catch_all, handler_off, addr_pairs);
+ handler_list->push_back(std::unique_ptr<const CatchHandler>(handler));
+ }
+ }
+
+ uint32_t size = dex_file.GetCodeItemSize(*disk_code_item);
+ CodeItem* code_item = header_->CodeItems().CreateAndAddItem(accessor.RegistersSize(),
+ accessor.InsSize(),
+ accessor.OutsSize(),
+ debug_info,
+ insns_size,
+ insns,
+ tries,
+ handler_list);
+ code_item->SetSize(size);
+
+ // Add the code item to the map.
+ DCHECK(!code_item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ code_item->SetOffset(offset);
+ }
+ code_items_map_.emplace(offsets_pair, code_item);
+
+ // Add "fixup" references to types, strings, methods, and fields.
+ // This is temporary, as we will probably want more detailed parsing of the
+ // instructions here.
+ std::vector<TypeId*> type_ids;
+ std::vector<StringId*> string_ids;
+ std::vector<MethodId*> method_ids;
+ std::vector<FieldId*> field_ids;
+ if (GetIdsFromByteCode(code_item,
+ /*out*/ &type_ids,
+ /*out*/ &string_ids,
+ /*out*/ &method_ids,
+ /*out*/ &field_ids)) {
+ CodeFixups* fixups = new CodeFixups(std::move(type_ids),
+ std::move(string_ids),
+ std::move(method_ids),
+ std::move(field_ids));
+ code_item->SetCodeFixups(fixups);
+ }
+
+ return code_item;
+}
+
+ClassData* BuilderMaps::CreateClassData(
+ const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset) {
+ // Read the fields and methods defined by the class, resolving the circular reference from those
+ // to classes by setting class at the same time.
+ ClassData* class_data = class_datas_map_.GetExistingObject(offset);
+ if (class_data == nullptr && encoded_data != nullptr) {
+ ClassDataItemIterator cdii(dex_file, encoded_data);
+ // Static fields.
+ FieldItemVector* static_fields = new FieldItemVector();
+ for (; cdii.HasNextStaticField(); cdii.Next()) {
+ FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ static_fields->emplace_back(access_flags, field_item);
+ }
+ // Instance fields.
+ FieldItemVector* instance_fields = new FieldItemVector();
+ for (; cdii.HasNextInstanceField(); cdii.Next()) {
+ FieldId* field_item = header_->FieldIds()[cdii.GetMemberIndex()];
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ instance_fields->emplace_back(access_flags, field_item);
+ }
+ // Direct methods.
+ MethodItemVector* direct_methods = new MethodItemVector();
+ for (; cdii.HasNextDirectMethod(); cdii.Next()) {
+ direct_methods->push_back(GenerateMethodItem(dex_file, cdii));
+ }
+ // Virtual methods.
+ MethodItemVector* virtual_methods = new MethodItemVector();
+ for (; cdii.HasNextVirtualMethod(); cdii.Next()) {
+ virtual_methods->push_back(GenerateMethodItem(dex_file, cdii));
+ }
+ class_data = class_datas_map_.CreateAndAddItem(header_->ClassDatas(),
+ eagerly_assign_offsets_,
+ offset,
+ static_fields,
+ instance_fields,
+ direct_methods,
+ virtual_methods);
+ class_data->SetSize(cdii.EndDataPointer() - encoded_data);
+ }
+ return class_data;
+}
+
+void BuilderMaps::SortVectorsByMapOrder() {
+ header_->StringDatas().SortByMapOrder(string_datas_map_.Collection());
+ header_->TypeLists().SortByMapOrder(type_lists_map_.Collection());
+ header_->EncodedArrayItems().SortByMapOrder(encoded_array_items_map_.Collection());
+ header_->AnnotationItems().SortByMapOrder(annotation_items_map_.Collection());
+ header_->AnnotationSetItems().SortByMapOrder(annotation_set_items_map_.Collection());
+ header_->AnnotationSetRefLists().SortByMapOrder(annotation_set_ref_lists_map_.Collection());
+ header_->AnnotationsDirectoryItems().SortByMapOrder(
+ annotations_directory_items_map_.Collection());
+ header_->DebugInfoItems().SortByMapOrder(debug_info_items_map_.Collection());
+ header_->CodeItems().SortByMapOrder(code_items_map_);
+ header_->ClassDatas().SortByMapOrder(class_datas_map_.Collection());
+}
+
+bool BuilderMaps::GetIdsFromByteCode(const CodeItem* code,
+ std::vector<TypeId*>* type_ids,
+ std::vector<StringId*>* string_ids,
+ std::vector<MethodId*>* method_ids,
+ std::vector<FieldId*>* field_ids) {
+ bool has_id = false;
+ IterationRange<DexInstructionIterator> instructions = code->Instructions();
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for (; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ SafeDexInstructionIterator next = it;
+ ++next;
+ if (next.IsErrorState()) {
+ break;
+ }
+ has_id |= GetIdFromInstruction(&it.Inst(), type_ids, string_ids, method_ids, field_ids);
+ } // for
+ return has_id;
+}
+
+bool BuilderMaps::GetIdFromInstruction(const Instruction* dec_insn,
+ std::vector<TypeId*>* type_ids,
+ std::vector<StringId*>* string_ids,
+ std::vector<MethodId*>* method_ids,
+ std::vector<FieldId*>* field_ids) {
+ // Determine index and width of the string.
+ uint32_t index = 0;
+ switch (Instruction::FormatOf(dec_insn->Opcode())) {
+ // SOME NOT SUPPORTED:
+ // case Instruction::k20bc:
+ case Instruction::k21c:
+ case Instruction::k35c:
+ // case Instruction::k35ms:
+ case Instruction::k3rc:
+ // case Instruction::k3rms:
+ // case Instruction::k35mi:
+ // case Instruction::k3rmi:
+ case Instruction::k45cc:
+ case Instruction::k4rcc:
+ index = dec_insn->VRegB();
+ break;
+ case Instruction::k31c:
+ index = dec_insn->VRegB();
+ break;
+ case Instruction::k22c:
+ // case Instruction::k22cs:
+ index = dec_insn->VRegC();
+ break;
+ default:
+ break;
+ } // switch
+
+ // Determine index type, and add reference to the appropriate collection.
+ switch (Instruction::IndexTypeOf(dec_insn->Opcode())) {
+ case Instruction::kIndexTypeRef:
+ if (index < header_->TypeIds().Size()) {
+ type_ids->push_back(header_->TypeIds()[index]);
+ return true;
+ }
+ break;
+ case Instruction::kIndexStringRef:
+ if (index < header_->StringIds().Size()) {
+ string_ids->push_back(header_->StringIds()[index]);
+ return true;
+ }
+ break;
+ case Instruction::kIndexMethodRef:
+ case Instruction::kIndexMethodAndProtoRef:
+ if (index < header_->MethodIds().Size()) {
+ method_ids->push_back(header_->MethodIds()[index]);
+ return true;
+ }
+ break;
+ case Instruction::kIndexFieldRef:
+ if (index < header_->FieldIds().Size()) {
+ field_ids->push_back(header_->FieldIds()[index]);
+ return true;
+ }
+ break;
+ case Instruction::kIndexUnknown:
+ case Instruction::kIndexNone:
+ case Instruction::kIndexVtableOffset:
+ case Instruction::kIndexFieldOffset:
+ default:
+ break;
+ } // switch
+ return false;
+}
+
+EncodedValue* BuilderMaps::ReadEncodedValue(const DexFile& dex_file, const uint8_t** data) {
+ const uint8_t encoded_value = *(*data)++;
+ const uint8_t type = encoded_value & 0x1f;
+ EncodedValue* item = new EncodedValue(type);
+ ReadEncodedValue(dex_file, data, type, encoded_value >> 5, item);
+ return item;
+}
+
+EncodedValue* BuilderMaps::ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length) {
+ EncodedValue* item = new EncodedValue(type);
+ ReadEncodedValue(dex_file, data, type, length, item);
+ return item;
+}
+
+void BuilderMaps::ReadEncodedValue(const DexFile& dex_file,
+ const uint8_t** data,
+ uint8_t type,
+ uint8_t length,
+ EncodedValue* item) {
+ switch (type) {
+ case DexFile::kDexAnnotationByte:
+ item->SetByte(static_cast<int8_t>(ReadVarWidth(data, length, false)));
+ break;
+ case DexFile::kDexAnnotationShort:
+ item->SetShort(static_cast<int16_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationChar:
+ item->SetChar(static_cast<uint16_t>(ReadVarWidth(data, length, false)));
+ break;
+ case DexFile::kDexAnnotationInt:
+ item->SetInt(static_cast<int32_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationLong:
+ item->SetLong(static_cast<int64_t>(ReadVarWidth(data, length, true)));
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ // Fill on right.
+ union {
+ float f;
+ uint32_t data;
+ } conv;
+ conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
+ item->SetFloat(conv.f);
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ // Fill on right.
+ union {
+ double d;
+ uint64_t data;
+ } conv;
+ conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
+ item->SetDouble(conv.d);
+ break;
+ }
+ case DexFile::kDexAnnotationMethodType: {
+ const uint32_t proto_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetProtoId(header_->ProtoIds()[proto_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationMethodHandle: {
+ const uint32_t method_handle_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetMethodHandle(header_->MethodHandleItems()[method_handle_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetStringId(header_->StringIds()[string_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetTypeId(header_->TypeIds()[string_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetFieldId(header_->FieldIds()[field_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item->SetMethodId(header_->MethodIds()[method_index]);
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ EncodedValueVector* values = new EncodedValueVector();
+ const uint32_t offset = *data - dex_file.DataBegin();
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ // Decode all elements.
+ for (uint32_t i = 0; i < size; i++) {
+ values->push_back(std::unique_ptr<EncodedValue>(ReadEncodedValue(dex_file, data)));
+ }
+ EncodedArrayItem* array_item = new EncodedArrayItem(values);
+ if (eagerly_assign_offsets_) {
+ array_item->SetOffset(offset);
+ }
+ item->SetEncodedArray(array_item);
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ AnnotationElementVector* elements = new AnnotationElementVector();
+ const uint32_t type_idx = DecodeUnsignedLeb128(data);
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ // Decode all name=value pairs.
+ for (uint32_t i = 0; i < size; i++) {
+ const uint32_t name_index = DecodeUnsignedLeb128(data);
+ elements->push_back(std::unique_ptr<AnnotationElement>(
+ new AnnotationElement(header_->StringIds()[name_index],
+ ReadEncodedValue(dex_file, data))));
+ }
+ item->SetEncodedAnnotation(new EncodedAnnotation(header_->TypeIds()[type_idx], elements));
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ item->SetBoolean(length != 0);
+ break;
+ default:
+ break;
+ }
+}
+
+MethodItem BuilderMaps::GenerateMethodItem(const DexFile& dex_file, ClassDataItemIterator& cdii) {
+ MethodId* method_id = header_->MethodIds()[cdii.GetMemberIndex()];
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+ // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
+ // they may have different debug info streams.
+ CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
+ disk_code_item,
+ cdii.GetMethodCodeItemOffset(),
+ cdii.GetMemberIndex());
+ return MethodItem(access_flags, method_id, code_item);
+}
+
+ParameterAnnotation* BuilderMaps::GenerateParameterAnnotation(
+ const DexFile& dex_file,
+ MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ uint32_t offset) {
+ AnnotationSetRefList* set_ref_list = annotation_set_ref_lists_map_.GetExistingObject(offset);
+ if (set_ref_list == nullptr) {
+ std::vector<AnnotationSetItem*>* annotations = new std::vector<AnnotationSetItem*>();
+ for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
+ const DexFile::AnnotationSetItem* annotation_set_item =
+ dex_file.GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
+ uint32_t set_offset = annotation_set_ref_list->list_[i].annotations_off_;
+ annotations->push_back(CreateAnnotationSetItem(dex_file, annotation_set_item, set_offset));
+ }
+ set_ref_list =
+ annotation_set_ref_lists_map_.CreateAndAddItem(header_->AnnotationSetRefLists(),
+ eagerly_assign_offsets_,
+ offset,
+ annotations);
+ }
+ return new ParameterAnnotation(method_id, set_ref_list);
+}
+
} // namespace dex_ir
} // namespace art
diff --git a/dexlayout/dex_verify.cc b/dexlayout/dex_verify.cc
index 18ddc86e0c..718d66feaa 100644
--- a/dexlayout/dex_verify.cc
+++ b/dexlayout/dex_verify.cc
@@ -31,38 +31,42 @@ using android::base::StringPrintf;
bool VerifyOutputDexFile(dex_ir::Header* orig_header,
dex_ir::Header* output_header,
std::string* error_msg) {
- dex_ir::Collections& orig = orig_header->GetCollections();
- dex_ir::Collections& output = output_header->GetCollections();
-
// Compare all id sections. They have a defined order that can't be changed by dexlayout.
- if (!VerifyIds(orig.StringIds(), output.StringIds(), "string ids", error_msg) ||
- !VerifyIds(orig.TypeIds(), output.TypeIds(), "type ids", error_msg) ||
- !VerifyIds(orig.ProtoIds(), output.ProtoIds(), "proto ids", error_msg) ||
- !VerifyIds(orig.FieldIds(), output.FieldIds(), "field ids", error_msg) ||
- !VerifyIds(orig.MethodIds(), output.MethodIds(), "method ids", error_msg)) {
+ if (!VerifyIds(orig_header->StringIds(), output_header->StringIds(), "string ids", error_msg) ||
+ !VerifyIds(orig_header->TypeIds(), output_header->TypeIds(), "type ids", error_msg) ||
+ !VerifyIds(orig_header->ProtoIds(), output_header->ProtoIds(), "proto ids", error_msg) ||
+ !VerifyIds(orig_header->FieldIds(), output_header->FieldIds(), "field ids", error_msg) ||
+ !VerifyIds(orig_header->MethodIds(), output_header->MethodIds(), "method ids", error_msg)) {
return false;
}
// Compare class defs. The order may have been changed by dexlayout.
- if (!VerifyClassDefs(orig.ClassDefs(), output.ClassDefs(), error_msg)) {
+ if (!VerifyClassDefs(orig_header->ClassDefs(), output_header->ClassDefs(), error_msg)) {
return false;
}
return true;
}
-template<class T> bool VerifyIds(std::vector<std::unique_ptr<T>>& orig,
- std::vector<std::unique_ptr<T>>& output,
+template<class T> bool VerifyIds(dex_ir::CollectionVector<T>& orig,
+ dex_ir::CollectionVector<T>& output,
const char* section_name,
std::string* error_msg) {
- if (orig.size() != output.size()) {
- *error_msg = StringPrintf(
- "Mismatched size for %s section: %zu vs %zu.", section_name, orig.size(), output.size());
- return false;
- }
- for (size_t i = 0; i < orig.size(); ++i) {
- if (!VerifyId(orig[i].get(), output[i].get(), error_msg)) {
+ auto orig_iter = orig.begin();
+ auto output_iter = output.begin();
+ for (; orig_iter != orig.end() && output_iter != output.end(); ++orig_iter, ++output_iter) {
+ if (!VerifyId(orig_iter->get(), output_iter->get(), error_msg)) {
return false;
}
}
+ if (orig_iter != orig.end() || output_iter != output.end()) {
+ const char* longer;
+ if (orig_iter == orig.end()) {
+ longer = "output";
+ } else {
+ longer = "original";
+ }
+ *error_msg = StringPrintf("Mismatch for %s section: %s is longer.", section_name, longer);
+ return false;
+ }
return true;
}
@@ -181,29 +185,36 @@ struct ClassDefCompare {
// The class defs may have a new order due to dexlayout. Use the class's class_idx to uniquely
// identify them and sort them for comparison.
-bool VerifyClassDefs(std::vector<std::unique_ptr<dex_ir::ClassDef>>& orig,
- std::vector<std::unique_ptr<dex_ir::ClassDef>>& output,
+bool VerifyClassDefs(dex_ir::CollectionVector<dex_ir::ClassDef>& orig,
+ dex_ir::CollectionVector<dex_ir::ClassDef>& output,
std::string* error_msg) {
- if (orig.size() != output.size()) {
- *error_msg = StringPrintf(
- "Mismatched size for class defs section: %zu vs %zu.", orig.size(), output.size());
- return false;
- }
// Store the class defs into sets sorted by the class's type index.
std::set<dex_ir::ClassDef*, ClassDefCompare> orig_set;
std::set<dex_ir::ClassDef*, ClassDefCompare> output_set;
- for (size_t i = 0; i < orig.size(); ++i) {
- orig_set.insert(orig[i].get());
- output_set.insert(output[i].get());
- }
- auto orig_iter = orig_set.begin();
- auto output_iter = output_set.begin();
- while (orig_iter != orig_set.end() && output_iter != output_set.end()) {
- if (!VerifyClassDef(*orig_iter, *output_iter, error_msg)) {
+ auto orig_iter = orig.begin();
+ auto output_iter = output.begin();
+ for (; orig_iter != orig.end() && output_iter != output.end(); ++orig_iter, ++output_iter) {
+ orig_set.insert(orig_iter->get());
+ output_set.insert(output_iter->get());
+ }
+ if (orig_iter != orig.end() || output_iter != output.end()) {
+ const char* longer;
+ if (orig_iter == orig.end()) {
+ longer = "output";
+ } else {
+ longer = "original";
+ }
+ *error_msg = StringPrintf("Mismatch for class defs section: %s is longer.", longer);
+ return false;
+ }
+ auto orig_set_iter = orig_set.begin();
+ auto output_set_iter = output_set.begin();
+ while (orig_set_iter != orig_set.end() && output_set_iter != output_set.end()) {
+ if (!VerifyClassDef(*orig_set_iter, *output_set_iter, error_msg)) {
return false;
}
- orig_iter++;
- output_iter++;
+ orig_set_iter++;
+ output_set_iter++;
}
return true;
}
@@ -769,8 +780,8 @@ bool VerifyFields(dex_ir::FieldItemVector* orig,
return false;
}
for (size_t i = 0; i < orig->size(); ++i) {
- dex_ir::FieldItem* orig_field = (*orig)[i].get();
- dex_ir::FieldItem* output_field = (*output)[i].get();
+ dex_ir::FieldItem* orig_field = &(*orig)[i];
+ dex_ir::FieldItem* output_field = &(*output)[i];
if (orig_field->GetFieldId()->GetIndex() != output_field->GetFieldId()->GetIndex()) {
*error_msg = StringPrintf("Mismatched field index for class data at offset %x: %u vs %u.",
orig_offset,
@@ -802,8 +813,8 @@ bool VerifyMethods(dex_ir::MethodItemVector* orig,
return false;
}
for (size_t i = 0; i < orig->size(); ++i) {
- dex_ir::MethodItem* orig_method = (*orig)[i].get();
- dex_ir::MethodItem* output_method = (*output)[i].get();
+ dex_ir::MethodItem* orig_method = &(*orig)[i];
+ dex_ir::MethodItem* output_method = &(*output)[i];
if (orig_method->GetMethodId()->GetIndex() != output_method->GetMethodId()->GetIndex()) {
*error_msg = StringPrintf("Mismatched method index for class data at offset %x: %u vs %u.",
orig_offset,
@@ -907,7 +918,7 @@ bool VerifyDebugInfo(dex_ir::DebugInfoItem* orig,
*error_msg = "DebugInfo null/non-null mismatch.";
return false;
}
- if (memcmp(orig_data, output_data, orig_size) != 0) {
+ if (orig_data != nullptr && memcmp(orig_data, output_data, orig_size) != 0) {
*error_msg = "DebugInfo bytes mismatch.";
return false;
}
diff --git a/dexlayout/dex_verify.h b/dexlayout/dex_verify.h
index 998939bbce..4943defe16 100644
--- a/dexlayout/dex_verify.h
+++ b/dexlayout/dex_verify.h
@@ -30,8 +30,8 @@ bool VerifyOutputDexFile(dex_ir::Header* orig_header,
dex_ir::Header* output_header,
std::string* error_msg);
-template<class T> bool VerifyIds(std::vector<std::unique_ptr<T>>& orig,
- std::vector<std::unique_ptr<T>>& output,
+template<class T> bool VerifyIds(dex_ir::CollectionVector<T>& orig,
+ dex_ir::CollectionVector<T>& output,
const char* section_name,
std::string* error_msg);
bool VerifyId(dex_ir::StringId* orig, dex_ir::StringId* output, std::string* error_msg);
@@ -40,8 +40,8 @@ bool VerifyId(dex_ir::ProtoId* orig, dex_ir::ProtoId* output, std::string* error
bool VerifyId(dex_ir::FieldId* orig, dex_ir::FieldId* output, std::string* error_msg);
bool VerifyId(dex_ir::MethodId* orig, dex_ir::MethodId* output, std::string* error_msg);
-bool VerifyClassDefs(std::vector<std::unique_ptr<dex_ir::ClassDef>>& orig,
- std::vector<std::unique_ptr<dex_ir::ClassDef>>& output,
+bool VerifyClassDefs(dex_ir::CollectionVector<dex_ir::ClassDef>& orig,
+ dex_ir::CollectionVector<dex_ir::ClassDef>& output,
std::string* error_msg);
bool VerifyClassDef(dex_ir::ClassDef* orig, dex_ir::ClassDef* output, std::string* error_msg);
diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc
index c8aac941ff..4a36744e97 100644
--- a/dexlayout/dex_visualize.cc
+++ b/dexlayout/dex_visualize.cc
@@ -252,9 +252,9 @@ void VisualizeDexLayout(dex_ir::Header* header,
return;
}
- const uint32_t class_defs_size = header->GetCollections().ClassDefsSize();
+ const uint32_t class_defs_size = header->ClassDefs().Size();
for (uint32_t class_index = 0; class_index < class_defs_size; class_index++) {
- dex_ir::ClassDef* class_def = header->GetCollections().GetClassDef(class_index);
+ dex_ir::ClassDef* class_def = header->ClassDefs()[class_index];
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
if (profile_info != nullptr && !profile_info->ContainsClass(*dex_file, type_idx)) {
continue;
@@ -279,22 +279,22 @@ void VisualizeDexLayout(dex_ir::Header* header,
dumper->DumpAddressRange(class_data, class_index);
if (class_data->StaticFields()) {
for (auto& field_item : *class_data->StaticFields()) {
- dumper->DumpFieldItem(field_item.get(), class_index);
+ dumper->DumpFieldItem(&field_item, class_index);
}
}
if (class_data->InstanceFields()) {
for (auto& field_item : *class_data->InstanceFields()) {
- dumper->DumpFieldItem(field_item.get(), class_index);
+ dumper->DumpFieldItem(&field_item, class_index);
}
}
if (class_data->DirectMethods()) {
for (auto& method_item : *class_data->DirectMethods()) {
- dumper->DumpMethodItem(method_item.get(), dex_file, class_index, profile_info);
+ dumper->DumpMethodItem(&method_item, dex_file, class_index, profile_info);
}
}
if (class_data->VirtualMethods()) {
for (auto& method_item : *class_data->VirtualMethods()) {
- dumper->DumpMethodItem(method_item.get(), dex_file, class_index, profile_info);
+ dumper->DumpMethodItem(&method_item, dex_file, class_index, profile_info);
}
}
}
@@ -305,7 +305,7 @@ static uint32_t FindNextByteAfterSection(dex_ir::Header* header,
const std::vector<dex_ir::DexFileSection>& sorted_sections,
size_t section_index) {
for (size_t i = section_index + 1; i < sorted_sections.size(); ++i) {
- const dex_ir::DexFileSection& section = sorted_sections.at(i);
+ const dex_ir::DexFileSection& section = sorted_sections[i];
if (section.size != 0) {
return section.offset;
}
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index eead13f69a..a4c5cda4ba 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -207,21 +207,21 @@ void DexWriter::WriteEncodedAnnotation(Stream* stream, dex_ir::EncodedAnnotation
void DexWriter::WriteEncodedFields(Stream* stream, dex_ir::FieldItemVector* fields) {
uint32_t prev_index = 0;
- for (std::unique_ptr<dex_ir::FieldItem>& field : *fields) {
- uint32_t index = field->GetFieldId()->GetIndex();
+ for (auto& field : *fields) {
+ uint32_t index = field.GetFieldId()->GetIndex();
stream->WriteUleb128(index - prev_index);
- stream->WriteUleb128(field->GetAccessFlags());
+ stream->WriteUleb128(field.GetAccessFlags());
prev_index = index;
}
}
void DexWriter::WriteEncodedMethods(Stream* stream, dex_ir::MethodItemVector* methods) {
uint32_t prev_index = 0;
- for (std::unique_ptr<dex_ir::MethodItem>& method : *methods) {
- uint32_t index = method->GetMethodId()->GetIndex();
- uint32_t code_off = method->GetCodeItem() == nullptr ? 0 : method->GetCodeItem()->GetOffset();
+ for (auto& method : *methods) {
+ uint32_t index = method.GetMethodId()->GetIndex();
+ uint32_t code_off = method.GetCodeItem() == nullptr ? 0 : method.GetCodeItem()->GetOffset();
stream->WriteUleb128(index - prev_index);
- stream->WriteUleb128(method->GetAccessFlags());
+ stream->WriteUleb128(method.GetAccessFlags());
stream->WriteUleb128(code_off);
prev_index = index;
}
@@ -231,7 +231,7 @@ void DexWriter::WriteEncodedMethods(Stream* stream, dex_ir::MethodItemVector* me
// function that takes a CollectionVector<T> and uses overloading.
void DexWriter::WriteStringIds(Stream* stream, bool reserve_only) {
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::StringId>& string_id : header_->GetCollections().StringIds()) {
+ for (auto& string_id : header_->StringIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeStringIdItem));
if (reserve_only) {
stream->Skip(string_id->GetSize());
@@ -241,7 +241,7 @@ void DexWriter::WriteStringIds(Stream* stream, bool reserve_only) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetStringIdsOffset(start);
+ header_->StringIds().SetOffset(start);
}
}
@@ -256,25 +256,25 @@ void DexWriter::WriteStringData(Stream* stream, dex_ir::StringData* string_data)
void DexWriter::WriteStringDatas(Stream* stream) {
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::StringData>& string_data : header_->GetCollections().StringDatas()) {
+ for (auto& string_data : header_->StringDatas()) {
WriteStringData(stream, string_data.get());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetStringDatasOffset(start);
+ header_->StringDatas().SetOffset(start);
}
}
void DexWriter::WriteTypeIds(Stream* stream) {
uint32_t descriptor_idx[1];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::TypeId>& type_id : header_->GetCollections().TypeIds()) {
+ for (auto& type_id : header_->TypeIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeTypeIdItem));
ProcessOffset(stream, type_id.get());
descriptor_idx[0] = type_id->GetStringId()->GetIndex();
stream->Write(descriptor_idx, type_id->GetSize());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetTypeIdsOffset(start);
+ header_->TypeIds().SetOffset(start);
}
}
@@ -282,7 +282,7 @@ void DexWriter::WriteTypeLists(Stream* stream) {
uint32_t size[1];
uint16_t list[1];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::TypeList>& type_list : header_->GetCollections().TypeLists()) {
+ for (auto& type_list : header_->TypeLists()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeTypeList));
size[0] = type_list->GetTypeList()->size();
ProcessOffset(stream, type_list.get());
@@ -293,14 +293,14 @@ void DexWriter::WriteTypeLists(Stream* stream) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetTypeListsOffset(start);
+ header_->TypeLists().SetOffset(start);
}
}
void DexWriter::WriteProtoIds(Stream* stream, bool reserve_only) {
uint32_t buffer[3];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::ProtoId>& proto_id : header_->GetCollections().ProtoIds()) {
+ for (auto& proto_id : header_->ProtoIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeProtoIdItem));
ProcessOffset(stream, proto_id.get());
if (reserve_only) {
@@ -313,14 +313,14 @@ void DexWriter::WriteProtoIds(Stream* stream, bool reserve_only) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetProtoIdsOffset(start);
+ header_->ProtoIds().SetOffset(start);
}
}
void DexWriter::WriteFieldIds(Stream* stream) {
uint16_t buffer[4];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::FieldId>& field_id : header_->GetCollections().FieldIds()) {
+ for (auto& field_id : header_->FieldIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeFieldIdItem));
ProcessOffset(stream, field_id.get());
buffer[0] = field_id->Class()->GetIndex();
@@ -330,14 +330,14 @@ void DexWriter::WriteFieldIds(Stream* stream) {
stream->Write(buffer, field_id->GetSize());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetFieldIdsOffset(start);
+ header_->FieldIds().SetOffset(start);
}
}
void DexWriter::WriteMethodIds(Stream* stream) {
uint16_t buffer[4];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::MethodId>& method_id : header_->GetCollections().MethodIds()) {
+ for (auto& method_id : header_->MethodIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeMethodIdItem));
ProcessOffset(stream, method_id.get());
buffer[0] = method_id->Class()->GetIndex();
@@ -347,28 +347,26 @@ void DexWriter::WriteMethodIds(Stream* stream) {
stream->Write(buffer, method_id->GetSize());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetMethodIdsOffset(start);
+ header_->MethodIds().SetOffset(start);
}
}
void DexWriter::WriteEncodedArrays(Stream* stream) {
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::EncodedArrayItem>& encoded_array :
- header_->GetCollections().EncodedArrayItems()) {
+ for (auto& encoded_array : header_->EncodedArrayItems()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeEncodedArrayItem));
ProcessOffset(stream, encoded_array.get());
WriteEncodedArray(stream, encoded_array->GetEncodedValues());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetEncodedArrayItemsOffset(start);
+ header_->EncodedArrayItems().SetOffset(start);
}
}
void DexWriter::WriteAnnotations(Stream* stream) {
uint8_t visibility[1];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::AnnotationItem>& annotation :
- header_->GetCollections().AnnotationItems()) {
+ for (auto& annotation : header_->AnnotationItems()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationItem));
visibility[0] = annotation->GetVisibility();
ProcessOffset(stream, annotation.get());
@@ -376,7 +374,7 @@ void DexWriter::WriteAnnotations(Stream* stream) {
WriteEncodedAnnotation(stream, annotation->GetAnnotation());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetAnnotationItemsOffset(start);
+ header_->AnnotationItems().SetOffset(start);
}
}
@@ -384,8 +382,7 @@ void DexWriter::WriteAnnotationSets(Stream* stream) {
uint32_t size[1];
uint32_t annotation_off[1];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::AnnotationSetItem>& annotation_set :
- header_->GetCollections().AnnotationSetItems()) {
+ for (auto& annotation_set : header_->AnnotationSetItems()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationSetItem));
size[0] = annotation_set->GetItems()->size();
ProcessOffset(stream, annotation_set.get());
@@ -396,7 +393,7 @@ void DexWriter::WriteAnnotationSets(Stream* stream) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetAnnotationSetItemsOffset(start);
+ header_->AnnotationSetItems().SetOffset(start);
}
}
@@ -404,8 +401,7 @@ void DexWriter::WriteAnnotationSetRefs(Stream* stream) {
uint32_t size[1];
uint32_t annotations_off[1];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::AnnotationSetRefList>& annotation_set_ref :
- header_->GetCollections().AnnotationSetRefLists()) {
+ for (auto& annotation_set_ref : header_->AnnotationSetRefLists()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationSetRefList));
size[0] = annotation_set_ref->GetItems()->size();
ProcessOffset(stream, annotation_set_ref.get());
@@ -416,7 +412,7 @@ void DexWriter::WriteAnnotationSetRefs(Stream* stream) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetAnnotationSetRefListsOffset(start);
+ header_->AnnotationSetRefLists().SetOffset(start);
}
}
@@ -424,8 +420,7 @@ void DexWriter::WriteAnnotationsDirectories(Stream* stream) {
uint32_t directory_buffer[4];
uint32_t annotation_buffer[2];
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::AnnotationsDirectoryItem>& annotations_directory :
- header_->GetCollections().AnnotationsDirectoryItems()) {
+ for (auto& annotations_directory : header_->AnnotationsDirectoryItems()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeAnnotationsDirectoryItem));
ProcessOffset(stream, annotations_directory.get());
directory_buffer[0] = annotations_directory->GetClassAnnotation() == nullptr ? 0 :
@@ -463,7 +458,7 @@ void DexWriter::WriteAnnotationsDirectories(Stream* stream) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetAnnotationsDirectoryItemsOffset(start);
+ header_->AnnotationsDirectoryItems().SetOffset(start);
}
}
@@ -475,12 +470,11 @@ void DexWriter::WriteDebugInfoItem(Stream* stream, dex_ir::DebugInfoItem* debug_
void DexWriter::WriteDebugInfoItems(Stream* stream) {
const uint32_t start = stream->Tell();
- for (std::unique_ptr<dex_ir::DebugInfoItem>& debug_info :
- header_->GetCollections().DebugInfoItems()) {
+ for (auto& debug_info : header_->DebugInfoItems()) {
WriteDebugInfoItem(stream, debug_info.get());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetDebugInfoItemsOffset(start);
+ header_->DebugInfoItems().SetOffset(start);
}
}
@@ -558,7 +552,7 @@ void DexWriter::WriteCodeItems(Stream* stream, bool reserve_only) {
DexLayoutSections::SectionType::kSectionTypeCode)];
}
const uint32_t start = stream->Tell();
- for (auto& code_item : header_->GetCollections().CodeItems()) {
+ for (auto& code_item : header_->CodeItems()) {
uint32_t start_offset = stream->Tell();
WriteCodeItem(stream, code_item.get(), reserve_only);
// Only add the section hotness info once.
@@ -573,14 +567,14 @@ void DexWriter::WriteCodeItems(Stream* stream, bool reserve_only) {
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetCodeItemsOffset(start);
+ header_->CodeItems().SetOffset(start);
}
}
void DexWriter::WriteClassDefs(Stream* stream, bool reserve_only) {
const uint32_t start = stream->Tell();
uint32_t class_def_buffer[8];
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeClassDefItem));
if (reserve_only) {
stream->Skip(class_def->GetSize());
@@ -602,14 +596,14 @@ void DexWriter::WriteClassDefs(Stream* stream, bool reserve_only) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetClassDefsOffset(start);
+ header_->ClassDefs().SetOffset(start);
}
}
void DexWriter::WriteClassDatas(Stream* stream) {
const uint32_t start = stream->Tell();
for (const std::unique_ptr<dex_ir::ClassData>& class_data :
- header_->GetCollections().ClassDatas()) {
+ header_->ClassDatas()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeClassDataItem));
ProcessOffset(stream, class_data.get());
stream->WriteUleb128(class_data->StaticFields()->size());
@@ -622,15 +616,14 @@ void DexWriter::WriteClassDatas(Stream* stream) {
WriteEncodedMethods(stream, class_data->VirtualMethods());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetClassDatasOffset(start);
+ header_->ClassDatas().SetOffset(start);
}
}
void DexWriter::WriteCallSiteIds(Stream* stream, bool reserve_only) {
const uint32_t start = stream->Tell();
uint32_t call_site_off[1];
- for (std::unique_ptr<dex_ir::CallSiteId>& call_site_id :
- header_->GetCollections().CallSiteIds()) {
+ for (auto& call_site_id : header_->CallSiteIds()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeCallSiteIdItem));
if (reserve_only) {
stream->Skip(call_site_id->GetSize());
@@ -640,15 +633,14 @@ void DexWriter::WriteCallSiteIds(Stream* stream, bool reserve_only) {
}
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetCallSiteIdsOffset(start);
+ header_->CallSiteIds().SetOffset(start);
}
}
void DexWriter::WriteMethodHandles(Stream* stream) {
const uint32_t start = stream->Tell();
uint16_t method_handle_buff[4];
- for (std::unique_ptr<dex_ir::MethodHandleItem>& method_handle :
- header_->GetCollections().MethodHandleItems()) {
+ for (auto& method_handle : header_->MethodHandleItems()) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeMethodHandleItem));
method_handle_buff[0] = static_cast<uint16_t>(method_handle->GetMethodHandleType());
method_handle_buff[1] = 0; // unused.
@@ -657,7 +649,7 @@ void DexWriter::WriteMethodHandles(Stream* stream) {
stream->Write(method_handle_buff, method_handle->GetSize());
}
if (compute_offsets_ && start != stream->Tell()) {
- header_->GetCollections().SetMethodHandleItemsOffset(start);
+ header_->MethodHandleItems().SetOffset(start);
}
}
@@ -678,67 +670,66 @@ void DexWriter::WriteMapItems(Stream* stream, MapItemQueue* queue) {
}
void DexWriter::GenerateAndWriteMapItems(Stream* stream) {
- dex_ir::Collections& collection = header_->GetCollections();
MapItemQueue queue;
// Header and index section.
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeHeaderItem, 1, 0));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeStringIdItem,
- collection.StringIdsSize(),
- collection.StringIdsOffset()));
+ header_->StringIds().Size(),
+ header_->StringIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeTypeIdItem,
- collection.TypeIdsSize(),
- collection.TypeIdsOffset()));
+ header_->TypeIds().Size(),
+ header_->TypeIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeProtoIdItem,
- collection.ProtoIdsSize(),
- collection.ProtoIdsOffset()));
+ header_->ProtoIds().Size(),
+ header_->ProtoIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeFieldIdItem,
- collection.FieldIdsSize(),
- collection.FieldIdsOffset()));
+ header_->FieldIds().Size(),
+ header_->FieldIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMethodIdItem,
- collection.MethodIdsSize(),
- collection.MethodIdsOffset()));
+ header_->MethodIds().Size(),
+ header_->MethodIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeClassDefItem,
- collection.ClassDefsSize(),
- collection.ClassDefsOffset()));
+ header_->ClassDefs().Size(),
+ header_->ClassDefs().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeCallSiteIdItem,
- collection.CallSiteIdsSize(),
- collection.CallSiteIdsOffset()));
+ header_->CallSiteIds().Size(),
+ header_->CallSiteIds().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMethodHandleItem,
- collection.MethodHandleItemsSize(),
- collection.MethodHandleItemsOffset()));
+ header_->MethodHandleItems().Size(),
+ header_->MethodHandleItems().GetOffset()));
// Data section.
- queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMapList, 1, collection.MapListOffset()));
+ queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeMapList, 1, header_->MapListOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeTypeList,
- collection.TypeListsSize(),
- collection.TypeListsOffset()));
+ header_->TypeLists().Size(),
+ header_->TypeLists().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationSetRefList,
- collection.AnnotationSetRefListsSize(),
- collection.AnnotationSetRefListsOffset()));
+ header_->AnnotationSetRefLists().Size(),
+ header_->AnnotationSetRefLists().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationSetItem,
- collection.AnnotationSetItemsSize(),
- collection.AnnotationSetItemsOffset()));
+ header_->AnnotationSetItems().Size(),
+ header_->AnnotationSetItems().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeClassDataItem,
- collection.ClassDatasSize(),
- collection.ClassDatasOffset()));
+ header_->ClassDatas().Size(),
+ header_->ClassDatas().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeCodeItem,
- collection.CodeItemsSize(),
- collection.CodeItemsOffset()));
+ header_->CodeItems().Size(),
+ header_->CodeItems().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeStringDataItem,
- collection.StringDatasSize(),
- collection.StringDatasOffset()));
+ header_->StringDatas().Size(),
+ header_->StringDatas().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeDebugInfoItem,
- collection.DebugInfoItemsSize(),
- collection.DebugInfoItemsOffset()));
+ header_->DebugInfoItems().Size(),
+ header_->DebugInfoItems().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationItem,
- collection.AnnotationItemsSize(),
- collection.AnnotationItemsOffset()));
+ header_->AnnotationItems().Size(),
+ header_->AnnotationItems().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeEncodedArrayItem,
- collection.EncodedArrayItemsSize(),
- collection.EncodedArrayItemsOffset()));
+ header_->EncodedArrayItems().Size(),
+ header_->EncodedArrayItems().GetOffset()));
queue.AddIfNotEmpty(MapItem(DexFile::kDexTypeAnnotationsDirectoryItem,
- collection.AnnotationsDirectoryItemsSize(),
- collection.AnnotationsDirectoryItemsOffset()));
+ header_->AnnotationsDirectoryItems().Size(),
+ header_->AnnotationsDirectoryItems().GetOffset()));
WriteMapItems(stream, &queue);
}
@@ -761,20 +752,19 @@ void DexWriter::WriteHeader(Stream* stream) {
header.endian_tag_ = header_->EndianTag();
header.link_size_ = header_->LinkSize();
header.link_off_ = header_->LinkOffset();
- const dex_ir::Collections& collections = header_->GetCollections();
- header.map_off_ = collections.MapListOffset();
- header.string_ids_size_ = collections.StringIdsSize();
- header.string_ids_off_ = collections.StringIdsOffset();
- header.type_ids_size_ = collections.TypeIdsSize();
- header.type_ids_off_ = collections.TypeIdsOffset();
- header.proto_ids_size_ = collections.ProtoIdsSize();
- header.proto_ids_off_ = collections.ProtoIdsOffset();
- header.field_ids_size_ = collections.FieldIdsSize();
- header.field_ids_off_ = collections.FieldIdsOffset();
- header.method_ids_size_ = collections.MethodIdsSize();
- header.method_ids_off_ = collections.MethodIdsOffset();
- header.class_defs_size_ = collections.ClassDefsSize();
- header.class_defs_off_ = collections.ClassDefsOffset();
+ header.map_off_ = header_->MapListOffset();
+ header.string_ids_size_ = header_->StringIds().Size();
+ header.string_ids_off_ = header_->StringIds().GetOffset();
+ header.type_ids_size_ = header_->TypeIds().Size();
+ header.type_ids_off_ = header_->TypeIds().GetOffset();
+ header.proto_ids_size_ = header_->ProtoIds().Size();
+ header.proto_ids_off_ = header_->ProtoIds().GetOffset();
+ header.field_ids_size_ = header_->FieldIds().Size();
+ header.field_ids_off_ = header_->FieldIds().GetOffset();
+ header.method_ids_size_ = header_->MethodIds().Size();
+ header.method_ids_off_ = header_->MethodIds().GetOffset();
+ header.class_defs_size_ = header_->ClassDefs().Size();
+ header.class_defs_off_ = header_->ClassDefs().GetOffset();
header.data_size_ = header_->DataSize();
header.data_off_ = header_->DataOffset();
@@ -797,8 +787,6 @@ bool DexWriter::Write(DexContainer* output, std::string* error_msg) {
// Starting offset is right after the header.
stream->Seek(GetHeaderSize());
- dex_ir::Collections& collection = header_->GetCollections();
-
// Based on: https://source.android.com/devices/tech/dalvik/dex-format
// Since the offsets may not be calculated already, the writing must be done in the correct order.
const uint32_t string_ids_offset = stream->Tell();
@@ -863,9 +851,9 @@ bool DexWriter::Write(DexContainer* output, std::string* error_msg) {
// Write the map list.
if (compute_offsets_) {
stream->AlignTo(SectionAlignment(DexFile::kDexTypeMapList));
- collection.SetMapListOffset(stream->Tell());
+ header_->SetMapListOffset(stream->Tell());
} else {
- stream->Seek(collection.MapListOffset());
+ stream->Seek(header_->MapListOffset());
}
GenerateAndWriteMapItems(stream);
stream->AlignTo(kDataSectionAlignment);
@@ -882,7 +870,7 @@ bool DexWriter::Write(DexContainer* output, std::string* error_msg) {
}
// Write link data if it exists.
- const std::vector<uint8_t>& link_data = collection.LinkData();
+ const std::vector<uint8_t>& link_data = header_->LinkData();
if (link_data.size() > 0) {
CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
if (compute_offsets_) {
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index aa4e6d031e..493a8a2793 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -90,7 +90,9 @@ class PageCount {
map_[type]++;
}
size_t Get(uint16_t type) const {
- return map_.at(type);
+ auto it = map_.find(type);
+ DCHECK(it != map_.end());
+ return it->second;
}
private:
std::map<uint16_t, size_t> map_;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 62dd1a9554..d6dd9d1829 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -417,24 +417,24 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
outSize = snprintf(buf.get(), buf_size, "<no-index>");
break;
case Instruction::kIndexTypeRef:
- if (index < header->GetCollections().TypeIdsSize()) {
- const char* tp = header->GetCollections().GetTypeId(index)->GetStringId()->Data();
+ if (index < header->TypeIds().Size()) {
+ const char* tp = header->TypeIds()[index]->GetStringId()->Data();
outSize = snprintf(buf.get(), buf_size, "%s // type@%0*x", tp, width, index);
} else {
outSize = snprintf(buf.get(), buf_size, "<type?> // type@%0*x", width, index);
}
break;
case Instruction::kIndexStringRef:
- if (index < header->GetCollections().StringIdsSize()) {
- const char* st = header->GetCollections().GetStringId(index)->Data();
+ if (index < header->StringIds().Size()) {
+ const char* st = header->StringIds()[index]->Data();
outSize = snprintf(buf.get(), buf_size, "\"%s\" // string@%0*x", st, width, index);
} else {
outSize = snprintf(buf.get(), buf_size, "<string?> // string@%0*x", width, index);
}
break;
case Instruction::kIndexMethodRef:
- if (index < header->GetCollections().MethodIdsSize()) {
- dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(index);
+ if (index < header->MethodIds().Size()) {
+ dex_ir::MethodId* method_id = header->MethodIds()[index];
const char* name = method_id->Name()->Data();
std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -445,8 +445,8 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
}
break;
case Instruction::kIndexFieldRef:
- if (index < header->GetCollections().FieldIdsSize()) {
- dex_ir::FieldId* field_id = header->GetCollections().GetFieldId(index);
+ if (index < header->FieldIds().Size()) {
+ dex_ir::FieldId* field_id = header->FieldIds()[index];
const char* name = field_id->Name()->Data();
const char* type_descriptor = field_id->Type()->GetStringId()->Data();
const char* back_descriptor = field_id->Class()->GetStringId()->Data();
@@ -466,15 +466,15 @@ static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
case Instruction::kIndexMethodAndProtoRef: {
std::string method("<method?>");
std::string proto("<proto?>");
- if (index < header->GetCollections().MethodIdsSize()) {
- dex_ir::MethodId* method_id = header->GetCollections().GetMethodId(index);
+ if (index < header->MethodIds().Size()) {
+ dex_ir::MethodId* method_id = header->MethodIds()[index];
const char* name = method_id->Name()->Data();
std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
method = StringPrintf("%s.%s:%s", back_descriptor, name, type_descriptor.c_str());
}
- if (secondary_index < header->GetCollections().ProtoIdsSize()) {
- dex_ir::ProtoId* proto_id = header->GetCollections().GetProtoId(secondary_index);
+ if (secondary_index < header->ProtoIds().Size()) {
+ dex_ir::ProtoId* proto_id = header->ProtoIds()[secondary_index];
proto = GetSignatureForProtoId(proto_id);
}
outSize = snprintf(buf.get(), buf_size, "%s, %s // method@%0*x, proto@%0*x",
@@ -596,7 +596,6 @@ void DexLayout::DumpEncodedValue(const dex_ir::EncodedValue* data) {
*/
void DexLayout::DumpFileHeader() {
char sanitized[8 * 2 + 1];
- dex_ir::Collections& collections = header_->GetCollections();
fprintf(out_file_, "DEX file header:\n");
Asciify(sanitized, header_->Magic(), 8);
fprintf(out_file_, "magic : '%s'\n", sanitized);
@@ -610,24 +609,24 @@ void DexLayout::DumpFileHeader() {
fprintf(out_file_, "link_size : %d\n", header_->LinkSize());
fprintf(out_file_, "link_off : %d (0x%06x)\n",
header_->LinkOffset(), header_->LinkOffset());
- fprintf(out_file_, "string_ids_size : %d\n", collections.StringIdsSize());
+ fprintf(out_file_, "string_ids_size : %d\n", header_->StringIds().Size());
fprintf(out_file_, "string_ids_off : %d (0x%06x)\n",
- collections.StringIdsOffset(), collections.StringIdsOffset());
- fprintf(out_file_, "type_ids_size : %d\n", collections.TypeIdsSize());
+ header_->StringIds().GetOffset(), header_->StringIds().GetOffset());
+ fprintf(out_file_, "type_ids_size : %d\n", header_->TypeIds().Size());
fprintf(out_file_, "type_ids_off : %d (0x%06x)\n",
- collections.TypeIdsOffset(), collections.TypeIdsOffset());
- fprintf(out_file_, "proto_ids_size : %d\n", collections.ProtoIdsSize());
+ header_->TypeIds().GetOffset(), header_->TypeIds().GetOffset());
+ fprintf(out_file_, "proto_ids_size : %d\n", header_->ProtoIds().Size());
fprintf(out_file_, "proto_ids_off : %d (0x%06x)\n",
- collections.ProtoIdsOffset(), collections.ProtoIdsOffset());
- fprintf(out_file_, "field_ids_size : %d\n", collections.FieldIdsSize());
+ header_->ProtoIds().GetOffset(), header_->ProtoIds().GetOffset());
+ fprintf(out_file_, "field_ids_size : %d\n", header_->FieldIds().Size());
fprintf(out_file_, "field_ids_off : %d (0x%06x)\n",
- collections.FieldIdsOffset(), collections.FieldIdsOffset());
- fprintf(out_file_, "method_ids_size : %d\n", collections.MethodIdsSize());
+ header_->FieldIds().GetOffset(), header_->FieldIds().GetOffset());
+ fprintf(out_file_, "method_ids_size : %d\n", header_->MethodIds().Size());
fprintf(out_file_, "method_ids_off : %d (0x%06x)\n",
- collections.MethodIdsOffset(), collections.MethodIdsOffset());
- fprintf(out_file_, "class_defs_size : %d\n", collections.ClassDefsSize());
+ header_->MethodIds().GetOffset(), header_->MethodIds().GetOffset());
+ fprintf(out_file_, "class_defs_size : %d\n", header_->ClassDefs().Size());
fprintf(out_file_, "class_defs_off : %d (0x%06x)\n",
- collections.ClassDefsOffset(), collections.ClassDefsOffset());
+ header_->ClassDefs().GetOffset(), header_->ClassDefs().GetOffset());
fprintf(out_file_, "data_size : %d\n", header_->DataSize());
fprintf(out_file_, "data_off : %d (0x%06x)\n\n",
header_->DataOffset(), header_->DataOffset());
@@ -638,7 +637,7 @@ void DexLayout::DumpFileHeader() {
*/
void DexLayout::DumpClassDef(int idx) {
// General class information.
- dex_ir::ClassDef* class_def = header_->GetCollections().GetClassDef(idx);
+ dex_ir::ClassDef* class_def = header_->ClassDefs()[idx];
fprintf(out_file_, "Class #%d header:\n", idx);
fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetIndex());
fprintf(out_file_, "access_flags : %d (0x%04x)\n",
@@ -719,7 +718,7 @@ void DexLayout::DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
* Dumps class annotations.
*/
void DexLayout::DumpClassAnnotations(int idx) {
- dex_ir::ClassDef* class_def = header_->GetCollections().GetClassDef(idx);
+ dex_ir::ClassDef* class_def = header_->ClassDefs()[idx];
dex_ir::AnnotationsDirectoryItem* annotations_directory = class_def->Annotations();
if (annotations_directory == nullptr) {
return; // none
@@ -1039,7 +1038,7 @@ void DexLayout::DumpInstruction(const dex_ir::CodeItem* code,
* Dumps a bytecode disassembly.
*/
void DexLayout::DumpBytecodes(uint32_t idx, const dex_ir::CodeItem* code, uint32_t code_offset) {
- dex_ir::MethodId* method_id = header_->GetCollections().GetMethodId(idx);
+ dex_ir::MethodId* method_id = header_->MethodIds()[idx];
const char* name = method_id->Name()->Data();
std::string type_descriptor = GetSignatureForProtoId(method_id->Proto());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -1083,16 +1082,16 @@ static void DumpLocalsCb(void* context, const DexFile::LocalInfo& entry) {
/*
* Lookup functions.
*/
-static const char* StringDataByIdx(uint32_t idx, dex_ir::Collections& collections) {
- dex_ir::StringId* string_id = collections.GetStringIdOrNullPtr(idx);
+static const char* StringDataByIdx(uint32_t idx, dex_ir::Header* header) {
+ dex_ir::StringId* string_id = header->GetStringIdOrNullPtr(idx);
if (string_id == nullptr) {
return nullptr;
}
return string_id->Data();
}
-static const char* StringDataByTypeIdx(uint16_t idx, dex_ir::Collections& collections) {
- dex_ir::TypeId* type_id = collections.GetTypeIdOrNullPtr(idx);
+static const char* StringDataByTypeIdx(uint16_t idx, dex_ir::Header* header) {
+ dex_ir::TypeId* type_id = header->GetTypeIdOrNullPtr(idx);
if (type_id == nullptr) {
return nullptr;
}
@@ -1134,7 +1133,7 @@ void DexLayout::DumpCode(uint32_t idx,
if (debug_info != nullptr) {
DexFile::DecodeDebugPositionInfo(debug_info->GetDebugInfo(),
[this](uint32_t idx) {
- return StringDataByIdx(idx, this->header_->GetCollections());
+ return StringDataByIdx(idx, this->header_);
},
DumpPositionsCb,
out_file_);
@@ -1161,12 +1160,12 @@ void DexLayout::DumpCode(uint32_t idx,
code->InsSize(),
code->InsnsSize(),
[this](uint32_t idx) {
- return StringDataByIdx(idx, this->header_->GetCollections());
+ return StringDataByIdx(idx, this->header_);
},
[this](uint32_t idx) {
return
StringDataByTypeIdx(dchecked_integral_cast<uint16_t>(idx),
- this->header_->GetCollections());
+ this->header_);
},
DumpLocalsCb,
out_file_);
@@ -1182,7 +1181,7 @@ void DexLayout::DumpMethod(uint32_t idx, uint32_t flags, const dex_ir::CodeItem*
return;
}
- dex_ir::MethodId* method_id = header_->GetCollections().GetMethodId(idx);
+ dex_ir::MethodId* method_id = header_->MethodIds()[idx];
const char* name = method_id->Name()->Data();
char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
const char* back_descriptor = method_id->Class()->GetStringId()->Data();
@@ -1292,7 +1291,7 @@ void DexLayout::DumpSField(uint32_t idx, uint32_t flags, int i, dex_ir::EncodedV
return;
}
- dex_ir::FieldId* field_id = header_->GetCollections().GetFieldId(idx);
+ dex_ir::FieldId* field_id = header_->FieldIds()[idx];
const char* name = field_id->Name()->Data();
const char* type_descriptor = field_id->Type()->GetStringId()->Data();
const char* back_descriptor = field_id->Class()->GetStringId()->Data();
@@ -1346,7 +1345,7 @@ void DexLayout::DumpIField(uint32_t idx, uint32_t flags, int i) {
* the value will be replaced with a newly-allocated string.
*/
void DexLayout::DumpClass(int idx, char** last_package) {
- dex_ir::ClassDef* class_def = header_->GetCollections().GetClassDef(idx);
+ dex_ir::ClassDef* class_def = header_->ClassDefs()[idx];
// Omitting non-public class.
if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
return;
@@ -1364,8 +1363,7 @@ void DexLayout::DumpClass(int idx, char** last_package) {
// up the classes, sort them, and dump them alphabetically so the
// package name wouldn't jump around, but that's not a great plan
// for something that needs to run on the device.
- const char* class_descriptor =
- header_->GetCollections().GetClassDef(idx)->ClassType()->GetStringId()->Data();
+ const char* class_descriptor = header_->ClassDefs()[idx]->ClassType()->GetStringId()->Data();
if (!(class_descriptor[0] == 'L' &&
class_descriptor[strlen(class_descriptor)-1] == ';')) {
// Arrays and primitives should not be defined explicitly. Keep going?
@@ -1459,8 +1457,8 @@ void DexLayout::DumpClass(int idx, char** last_package) {
dex_ir::FieldItemVector* static_fields = class_data->StaticFields();
if (static_fields != nullptr) {
for (uint32_t i = 0; i < static_fields->size(); i++) {
- DumpSField((*static_fields)[i]->GetFieldId()->GetIndex(),
- (*static_fields)[i]->GetAccessFlags(),
+ DumpSField((*static_fields)[i].GetFieldId()->GetIndex(),
+ (*static_fields)[i].GetAccessFlags(),
i,
i < encoded_values_size ? (*encoded_values)[i].get() : nullptr);
} // for
@@ -1475,8 +1473,8 @@ void DexLayout::DumpClass(int idx, char** last_package) {
dex_ir::FieldItemVector* instance_fields = class_data->InstanceFields();
if (instance_fields != nullptr) {
for (uint32_t i = 0; i < instance_fields->size(); i++) {
- DumpIField((*instance_fields)[i]->GetFieldId()->GetIndex(),
- (*instance_fields)[i]->GetAccessFlags(),
+ DumpIField((*instance_fields)[i].GetFieldId()->GetIndex(),
+ (*instance_fields)[i].GetAccessFlags(),
i);
} // for
}
@@ -1490,9 +1488,9 @@ void DexLayout::DumpClass(int idx, char** last_package) {
dex_ir::MethodItemVector* direct_methods = class_data->DirectMethods();
if (direct_methods != nullptr) {
for (uint32_t i = 0; i < direct_methods->size(); i++) {
- DumpMethod((*direct_methods)[i]->GetMethodId()->GetIndex(),
- (*direct_methods)[i]->GetAccessFlags(),
- (*direct_methods)[i]->GetCodeItem(),
+ DumpMethod((*direct_methods)[i].GetMethodId()->GetIndex(),
+ (*direct_methods)[i].GetAccessFlags(),
+ (*direct_methods)[i].GetCodeItem(),
i);
} // for
}
@@ -1506,9 +1504,9 @@ void DexLayout::DumpClass(int idx, char** last_package) {
dex_ir::MethodItemVector* virtual_methods = class_data->VirtualMethods();
if (virtual_methods != nullptr) {
for (uint32_t i = 0; i < virtual_methods->size(); i++) {
- DumpMethod((*virtual_methods)[i]->GetMethodId()->GetIndex(),
- (*virtual_methods)[i]->GetAccessFlags(),
- (*virtual_methods)[i]->GetCodeItem(),
+ DumpMethod((*virtual_methods)[i].GetMethodId()->GetIndex(),
+ (*virtual_methods)[i].GetAccessFlags(),
+ (*virtual_methods)[i].GetCodeItem(),
i);
} // for
}
@@ -1543,7 +1541,7 @@ void DexLayout::DumpDexFile() {
// Iterate over all classes.
char* package = nullptr;
- const uint32_t class_defs_size = header_->GetCollections().ClassDefsSize();
+ const uint32_t class_defs_size = header_->ClassDefs().Size();
for (uint32_t i = 0; i < class_defs_size; i++) {
DumpClass(i, &package);
} // for
@@ -1562,13 +1560,13 @@ void DexLayout::DumpDexFile() {
void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
std::vector<dex_ir::ClassDef*> new_class_def_order;
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
if (info_->ContainsClass(*dex_file, type_idx)) {
new_class_def_order.push_back(class_def.get());
}
}
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
dex::TypeIndex type_idx(class_def->ClassType()->GetIndex());
if (!info_->ContainsClass(*dex_file, type_idx)) {
new_class_def_order.push_back(class_def.get());
@@ -1576,8 +1574,7 @@ void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
}
std::unordered_set<dex_ir::ClassData*> visited_class_data;
size_t class_data_index = 0;
- dex_ir::CollectionVector<dex_ir::ClassData>::Vector& class_datas =
- header_->GetCollections().ClassDatas();
+ auto& class_datas = header_->ClassDatas();
for (dex_ir::ClassDef* class_def : new_class_def_order) {
dex_ir::ClassData* class_data = class_def->GetClassData();
if (class_data != nullptr && visited_class_data.find(class_data) == visited_class_data.end()) {
@@ -1590,15 +1587,14 @@ void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
++class_data_index;
}
}
- CHECK_EQ(class_data_index, class_datas.size());
+ CHECK_EQ(class_data_index, class_datas.Size());
if (DexLayout::kChangeClassDefOrder) {
// This currently produces dex files that violate the spec since the super class class_def is
// supposed to occur before any subclasses.
- dex_ir::CollectionVector<dex_ir::ClassDef>::Vector& class_defs =
- header_->GetCollections().ClassDefs();
- CHECK_EQ(new_class_def_order.size(), class_defs.size());
- for (size_t i = 0; i < class_defs.size(); ++i) {
+ dex_ir::CollectionVector<dex_ir::ClassDef>& class_defs = header_->ClassDefs();
+ CHECK_EQ(new_class_def_order.size(), class_defs.Size());
+ for (size_t i = 0; i < class_defs.Size(); ++i) {
// Overwrite the existing vector with the new ordering, note that the sets of objects are
// equivalent, but the order changes. This is why this is not a memory leak.
// TODO: Consider cleaning this up with a shared_ptr.
@@ -1609,10 +1605,10 @@ void DexLayout::LayoutClassDefsAndClassData(const DexFile* dex_file) {
}
void DexLayout::LayoutStringData(const DexFile* dex_file) {
- const size_t num_strings = header_->GetCollections().StringIds().size();
+ const size_t num_strings = header_->StringIds().Size();
std::vector<bool> is_shorty(num_strings, false);
std::vector<bool> from_hot_method(num_strings, false);
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
// A name of a profile class is probably going to get looked up by ClassTable::Lookup, mark it
// as hot. Add its super class and interfaces as well, which can be used during initialization.
const bool is_profile_class =
@@ -1636,14 +1632,14 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
}
for (size_t i = 0; i < 2; ++i) {
for (auto& method : *(i == 0 ? data->DirectMethods() : data->VirtualMethods())) {
- const dex_ir::MethodId* method_id = method->GetMethodId();
- dex_ir::CodeItem* code_item = method->GetCodeItem();
+ const dex_ir::MethodId* method_id = method.GetMethodId();
+ dex_ir::CodeItem* code_item = method.GetCodeItem();
if (code_item == nullptr) {
continue;
}
const bool is_clinit = is_profile_class &&
- (method->GetAccessFlags() & kAccConstructor) != 0 &&
- (method->GetAccessFlags() & kAccStatic) != 0;
+ (method.GetAccessFlags() & kAccConstructor) != 0 &&
+ (method.GetAccessFlags() & kAccStatic) != 0;
const bool method_executed = is_clinit ||
info_->GetMethodHotness(MethodReference(dex_file, method_id->GetIndex())).IsInProfile();
if (!method_executed) {
@@ -1678,7 +1674,7 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
}
// Sort string data by specified order.
std::vector<dex_ir::StringId*> string_ids;
- for (auto& string_id : header_->GetCollections().StringIds()) {
+ for (auto& string_id : header_->StringIds()) {
string_ids.push_back(string_id.get());
}
std::sort(string_ids.begin(),
@@ -1699,8 +1695,7 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
// Order by index by default.
return a->GetIndex() < b->GetIndex();
});
- dex_ir::CollectionVector<dex_ir::StringData>::Vector& string_datas =
- header_->GetCollections().StringDatas();
+ auto& string_datas = header_->StringDatas();
// Now we know what order we want the string data, reorder them.
size_t data_index = 0;
for (dex_ir::StringId* string_id : string_ids) {
@@ -1713,11 +1708,11 @@ void DexLayout::LayoutStringData(const DexFile* dex_file) {
for (const std::unique_ptr<dex_ir::StringData>& data : string_datas) {
visited.insert(data.get());
}
- for (auto& string_id : header_->GetCollections().StringIds()) {
+ for (auto& string_id : header_->StringIds()) {
CHECK(visited.find(string_id->DataItem()) != visited.end());
}
}
- CHECK_EQ(data_index, string_datas.size());
+ CHECK_EQ(data_index, string_datas.Size());
}
// Orders code items according to specified class data ordering.
@@ -1732,7 +1727,7 @@ void DexLayout::LayoutCodeItems(const DexFile* dex_file) {
// Assign hotness flags to all code items.
for (InvokeType invoke_type : invoke_types) {
- for (std::unique_ptr<dex_ir::ClassDef>& class_def : header_->GetCollections().ClassDefs()) {
+ for (auto& class_def : header_->ClassDefs()) {
const bool is_profile_class =
info_->ContainsClass(*dex_file, dex::TypeIndex(class_def->ClassType()->GetIndex()));
@@ -1744,14 +1739,14 @@ void DexLayout::LayoutCodeItems(const DexFile* dex_file) {
for (auto& method : *(invoke_type == InvokeType::kDirect
? class_data->DirectMethods()
: class_data->VirtualMethods())) {
- const dex_ir::MethodId *method_id = method->GetMethodId();
- dex_ir::CodeItem *code_item = method->GetCodeItem();
+ const dex_ir::MethodId *method_id = method.GetMethodId();
+ dex_ir::CodeItem *code_item = method.GetCodeItem();
if (code_item == nullptr) {
continue;
}
// Separate executed methods (clinits and profiled methods) from unexecuted methods.
- const bool is_clinit = (method->GetAccessFlags() & kAccConstructor) != 0 &&
- (method->GetAccessFlags() & kAccStatic) != 0;
+ const bool is_clinit = (method.GetAccessFlags() & kAccConstructor) != 0 &&
+ (method.GetAccessFlags() & kAccStatic) != 0;
const bool is_startup_clinit = is_profile_class && is_clinit;
using Hotness = ProfileCompilationInfo::MethodHotness;
Hotness hotness = info_->GetMethodHotness(MethodReference(dex_file, method_id->GetIndex()));
@@ -1778,8 +1773,7 @@ void DexLayout::LayoutCodeItems(const DexFile* dex_file) {
}
}
- dex_ir::CollectionVector<dex_ir::CodeItem>::Vector& code_items =
- header_->GetCollections().CodeItems();
+ const auto& code_items = header_->CodeItems();
if (VLOG_IS_ON(dex)) {
size_t layout_count[static_cast<size_t>(LayoutType::kLayoutTypeCount)] = {};
for (const std::unique_ptr<dex_ir::CodeItem>& code_item : code_items) {
@@ -1871,7 +1865,7 @@ bool DexLayout::ProcessDexFile(const char* file_name,
const bool has_output_container = dex_container != nullptr;
const bool output = options_.output_dex_directory_ != nullptr || has_output_container;
- // Try to avoid eagerly assigning offsets to find bugs since GetOffset will abort if the offset
+ // Try to avoid eagerly assigning offsets to find bugs since Offset will abort if the offset
// is unassigned.
bool eagerly_assign_offsets = false;
if (options_.visualize_pattern_ || options_.show_section_statistics_ || options_.dump_) {
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index f148b94f3d..2b1352db16 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -468,7 +468,7 @@ class DexLayoutTest : public CommonRuntimeTest {
}
std::vector<std::string> test_files = { dex_file, profile_file, output_dex, second_output_dex };
- for (auto test_file : test_files) {
+ for (const std::string& test_file : test_files) {
if (!UnlinkFile(test_file)) {
return false;
}
@@ -501,7 +501,7 @@ class DexLayoutTest : public CommonRuntimeTest {
}
std::vector<std::string> dex_files = { input_dex, output_dex };
- for (auto dex_file : dex_files) {
+ for (const std::string& dex_file : dex_files) {
if (!UnlinkFile(dex_file)) {
return false;
}
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index ddb8fe1302..dea92e0cce 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -338,7 +338,7 @@ class ImgObjectVisitor : public ObjectVisitor {
ImgObjectVisitor(ComputeDirtyFunc dirty_func,
const uint8_t* begin_image_ptr,
const std::set<size_t>& dirty_pages) :
- dirty_func_(dirty_func),
+ dirty_func_(std::move(dirty_func)),
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
@@ -356,7 +356,7 @@ class ImgObjectVisitor : public ObjectVisitor {
}
private:
- ComputeDirtyFunc dirty_func_;
+ const ComputeDirtyFunc dirty_func_;
const uint8_t* begin_image_ptr_;
const std::set<size_t>& dirty_pages_;
};
@@ -649,7 +649,7 @@ class ImgArtMethodVisitor : public ArtMethodVisitor {
ImgArtMethodVisitor(ComputeDirtyFunc dirty_func,
const uint8_t* begin_image_ptr,
const std::set<size_t>& dirty_pages) :
- dirty_func_(dirty_func),
+ dirty_func_(std::move(dirty_func)),
begin_image_ptr_(begin_image_ptr),
dirty_pages_(dirty_pages) { }
virtual ~ImgArtMethodVisitor() OVERRIDE { }
@@ -658,7 +658,7 @@ class ImgArtMethodVisitor : public ArtMethodVisitor {
}
private:
- ComputeDirtyFunc dirty_func_;
+ const ComputeDirtyFunc dirty_func_;
const uint8_t* begin_image_ptr_;
const std::set<size_t>& dirty_pages_;
};
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index adf0ad6376..4ee48da5e8 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -31,6 +31,8 @@ cc_defaults {
"base/malloc_arena_pool.cc",
"base/memory_region.cc",
"base/mem_map.cc",
+ // "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
+ "base/mem_map_unix.cc",
"base/os_linux.cc",
"base/runtime_debug.cc",
"base/safe_copy.cc",
diff --git a/libartbase/base/arena_allocator.h b/libartbase/base/arena_allocator.h
index 4dccd033d6..a9ccae1b07 100644
--- a/libartbase/base/arena_allocator.h
+++ b/libartbase/base/arena_allocator.h
@@ -148,34 +148,9 @@ class ArenaAllocatorStatsImpl {
typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
-template <bool kAvailable, bool kValgrind>
-class ArenaAllocatorMemoryToolCheckImpl {
- // This is the generic template but since there is a partial specialization
- // for kValgrind == false, this can be instantiated only for kValgrind == true.
- static_assert(kValgrind, "This template can be instantiated only for Valgrind.");
- static_assert(kAvailable, "Valgrind implies memory tool availability.");
-
- public:
- ArenaAllocatorMemoryToolCheckImpl() : is_running_on_valgrind_(RUNNING_ON_MEMORY_TOOL) { }
- bool IsRunningOnMemoryTool() { return is_running_on_valgrind_; }
-
- private:
- const bool is_running_on_valgrind_;
-};
-
-template <bool kAvailable>
-class ArenaAllocatorMemoryToolCheckImpl<kAvailable, false> {
- public:
- ArenaAllocatorMemoryToolCheckImpl() { }
- bool IsRunningOnMemoryTool() { return kAvailable; }
-};
-
-typedef ArenaAllocatorMemoryToolCheckImpl<kMemoryToolIsAvailable, kMemoryToolIsValgrind>
- ArenaAllocatorMemoryToolCheck;
-
-class ArenaAllocatorMemoryTool : private ArenaAllocatorMemoryToolCheck {
+class ArenaAllocatorMemoryTool {
public:
- using ArenaAllocatorMemoryToolCheck::IsRunningOnMemoryTool;
+ bool IsRunningOnMemoryTool() { return kMemoryToolIsAvailable; }
void MakeDefined(void* ptr, size_t size) {
if (UNLIKELY(IsRunningOnMemoryTool())) {
diff --git a/libartbase/base/arena_allocator_test.cc b/libartbase/base/arena_allocator_test.cc
index e358710ca6..6323a2b97c 100644
--- a/libartbase/base/arena_allocator_test.cc
+++ b/libartbase/base/arena_allocator_test.cc
@@ -16,6 +16,7 @@
#include "arena_allocator-inl.h"
#include "arena_bit_vector.h"
+#include "base/common_art_test.h"
#include "gtest/gtest.h"
#include "malloc_arena_pool.h"
#include "memory_tool.h"
@@ -146,11 +147,8 @@ TEST_F(ArenaAllocatorTest, AllocAlignment) {
}
TEST_F(ArenaAllocatorTest, ReallocReuse) {
- // Realloc does not reuse arenas when running under sanitization. So we cannot do those
- if (RUNNING_ON_MEMORY_TOOL != 0) {
- printf("WARNING: TEST DISABLED FOR MEMORY_TOOL\n");
- return;
- }
+ // Realloc does not reuse arenas when running under sanitization.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
{
// Case 1: small aligned allocation, aligned extend inside arena.
diff --git a/libartbase/base/arena_containers.h b/libartbase/base/arena_containers.h
index bd57fb1cfc..41b3bb9f5d 100644
--- a/libartbase/base/arena_containers.h
+++ b/libartbase/base/arena_containers.h
@@ -70,15 +70,15 @@ using ArenaSafeMap =
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ArenaHashMap = HashMap<Key,
Value,
EmptyFn,
diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h
index b68f867bfa..9de84cdd20 100644
--- a/libartbase/base/atomic.h
+++ b/libartbase/base/atomic.h
@@ -28,6 +28,11 @@
namespace art {
+enum class CASMode {
+ kStrong,
+ kWeak,
+};
+
template<typename T>
class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
public:
@@ -100,6 +105,15 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
}
+ bool CompareAndSet(T expected_value,
+ T desired_value,
+ CASMode mode,
+ std::memory_order memory_order) {
+ return mode == CASMode::kStrong
+ ? this->compare_exchange_strong(expected_value, desired_value, memory_order)
+ : this->compare_exchange_weak(expected_value, desired_value, memory_order);
+ }
+
// Returns the address of the current atomic variable. This is only used by futex() which is
// declared to take a volatile address (see base/mutex-inl.h).
volatile T* Address() {
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index a3d3ee41d6..07c1611c60 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -57,6 +57,15 @@ class BitMemoryRegion FINAL : public ValueObject {
return result;
}
+ // Increase the size of the region and return the newly added range (starting at the old end).
+ ALWAYS_INLINE BitMemoryRegion Extend(size_t bit_length) {
+ BitMemoryRegion result = *this;
+ result.bit_start_ += result.bit_size_;
+ result.bit_size_ = bit_length;
+ bit_size_ += bit_length;
+ return result;
+ }
+
// Load a single bit in the region. The bit at offset 0 is the least
// significant bit in the first byte.
ATTRIBUTE_NO_SANITIZE_ADDRESS // We might touch extra bytes due to the alignment.
@@ -99,13 +108,6 @@ class BitMemoryRegion FINAL : public ValueObject {
return value & mask;
}
- // Load bits starting at given `bit_offset`, and advance the `bit_offset`.
- ALWAYS_INLINE uint32_t LoadBitsAndAdvance(size_t* bit_offset, size_t bit_length) const {
- uint32_t result = LoadBits(*bit_offset, bit_length);
- *bit_offset += bit_length;
- return result;
- }
-
// Store `bit_length` bits in `data` starting at given `bit_offset`.
// The least significant bit is stored in the smallest memory offset.
ALWAYS_INLINE void StoreBits(size_t bit_offset, uint32_t value, size_t bit_length) {
@@ -132,12 +134,6 @@ class BitMemoryRegion FINAL : public ValueObject {
DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
}
- // Store bits starting at given `bit_offset`, and advance the `bit_offset`.
- ALWAYS_INLINE void StoreBitsAndAdvance(size_t* bit_offset, uint32_t value, size_t bit_length) {
- StoreBits(*bit_offset, value, bit_length);
- *bit_offset += bit_length;
- }
-
// Store bits from other bit region.
ALWAYS_INLINE void StoreBits(size_t bit_offset, const BitMemoryRegion& src, size_t bit_length) {
DCHECK_LE(bit_offset, bit_size_);
@@ -151,6 +147,20 @@ class BitMemoryRegion FINAL : public ValueObject {
StoreBits(bit_offset + bit, src.LoadBits(bit, num_bits), num_bits);
}
+ // Count the number of set bits within the given bit range.
+ ALWAYS_INLINE size_t PopCount(size_t bit_offset, size_t bit_length) const {
+ DCHECK_LE(bit_offset, bit_size_);
+ DCHECK_LE(bit_length, bit_size_ - bit_offset);
+ size_t count = 0;
+ size_t bit = 0;
+ constexpr size_t kNumBits = BitSizeOf<uint32_t>();
+ for (; bit + kNumBits <= bit_length; bit += kNumBits) {
+ count += POPCOUNT(LoadBits(bit_offset + bit, kNumBits));
+ }
+ count += POPCOUNT(LoadBits(bit_offset + bit, bit_length - bit));
+ return count;
+ }
+
ALWAYS_INLINE bool Equals(const BitMemoryRegion& other) const {
return data_ == other.data_ &&
bit_start_ == other.bit_start_ &&
@@ -164,6 +174,62 @@ class BitMemoryRegion FINAL : public ValueObject {
size_t bit_size_ = 0;
};
+class BitMemoryReader {
+ public:
+ explicit BitMemoryReader(const uint8_t* data, size_t bit_offset = 0) {
+ MemoryRegion region(const_cast<uint8_t*>(data), BitsToBytesRoundUp(bit_offset));
+ finished_region_ = BitMemoryRegion(region, 0, bit_offset);
+ DCHECK_EQ(GetBitOffset(), bit_offset);
+ }
+
+ size_t GetBitOffset() const { return finished_region_.size_in_bits(); }
+
+ ALWAYS_INLINE BitMemoryRegion Skip(size_t bit_length) {
+ return finished_region_.Extend(bit_length);
+ }
+
+ ALWAYS_INLINE uint32_t ReadBits(size_t bit_length) {
+ return finished_region_.Extend(bit_length).LoadBits(0, bit_length);
+ }
+
+ private:
+ // Represents all of the bits which were read so far. There is no upper bound.
+ // Therefore, by definition, the "cursor" is always at the end of the region.
+ BitMemoryRegion finished_region_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitMemoryReader);
+};
+
+template<typename Vector>
+class BitMemoryWriter {
+ public:
+ explicit BitMemoryWriter(Vector* out, size_t bit_offset = 0)
+ : out_(out), bit_offset_(bit_offset) {
+ DCHECK_EQ(GetBitOffset(), bit_offset);
+ }
+
+ const uint8_t* data() const { return out_->data(); }
+
+ size_t GetBitOffset() const { return bit_offset_; }
+
+ ALWAYS_INLINE BitMemoryRegion Allocate(size_t bit_length) {
+ out_->resize(BitsToBytesRoundUp(bit_offset_ + bit_length));
+ BitMemoryRegion region(MemoryRegion(out_->data(), out_->size()), bit_offset_, bit_length);
+ bit_offset_ += bit_length;
+ return region;
+ }
+
+ ALWAYS_INLINE void WriteBits(uint32_t value, size_t bit_length) {
+ Allocate(bit_length).StoreBits(0, value, bit_length);
+ }
+
+ private:
+ Vector* out_;
+ size_t bit_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BitMemoryWriter);
+};
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_BIT_MEMORY_REGION_H_
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index 0ae60b9070..ee477215e7 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -18,6 +18,7 @@
#define ART_LIBARTBASE_BASE_BIT_TABLE_H_
#include <array>
+#include <initializer_list>
#include <numeric>
#include <string.h>
#include <type_traits>
@@ -25,6 +26,7 @@
#include "base/bit_memory_region.h"
#include "base/casts.h"
+#include "base/iteration_range.h"
#include "base/memory_region.h"
#include "base/scoped_arena_containers.h"
#include "base/stl_util.h"
@@ -38,100 +40,56 @@ constexpr uint32_t kVarintSmallValue = 11; // Maximum value which is stored as-
// The first four bits determine the variable length of the encoded integer:
// Values 0..11 represent the result as-is, with no further following bits.
// Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
-ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryRegion region, size_t* bit_offset) {
- uint32_t x = region.LoadBitsAndAdvance(bit_offset, kVarintHeaderBits);
+ALWAYS_INLINE static inline uint32_t DecodeVarintBits(BitMemoryReader& reader) {
+ uint32_t x = reader.ReadBits(kVarintHeaderBits);
if (x > kVarintSmallValue) {
- x = region.LoadBitsAndAdvance(bit_offset, (x - kVarintSmallValue) * kBitsPerByte);
+ x = reader.ReadBits((x - kVarintSmallValue) * kBitsPerByte);
}
return x;
}
// Store variable-length bit-packed integer from `data` starting at `bit_offset`.
template<typename Vector>
-ALWAYS_INLINE static inline void EncodeVarintBits(Vector* out, size_t* bit_offset, uint32_t value) {
+ALWAYS_INLINE static inline void EncodeVarintBits(BitMemoryWriter<Vector>& out, uint32_t value) {
if (value <= kVarintSmallValue) {
- out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- region.StoreBitsAndAdvance(bit_offset, value, kVarintHeaderBits);
+ out.WriteBits(value, kVarintHeaderBits);
} else {
uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
- out->resize(BitsToBytesRoundUp(*bit_offset + kVarintHeaderBits + num_bits));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
- region.StoreBitsAndAdvance(bit_offset, header, kVarintHeaderBits);
- region.StoreBitsAndAdvance(bit_offset, value, num_bits);
+ out.WriteBits(header, kVarintHeaderBits);
+ out.WriteBits(value, num_bits);
}
}
+// Generic purpose table of uint32_t values, which are tightly packed at bit level.
+// It has its own header with the number of rows and the bit-widths of all columns.
+// The values are accessible by (row, column). The value -1 is stored efficiently.
template<uint32_t kNumColumns>
-class BitTable {
+class BitTableBase {
public:
- class Accessor {
- public:
- static constexpr uint32_t kCount = kNumColumns;
- static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max();
-
- Accessor() {}
- Accessor(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
-
- ALWAYS_INLINE uint32_t Row() const { return row_; }
-
- ALWAYS_INLINE bool IsValid() const { return table_ != nullptr && row_ < table_->NumRows(); }
-
- template<uint32_t Column>
- ALWAYS_INLINE uint32_t Get() const {
- static_assert(Column < kNumColumns, "Column out of bounds");
- return table_->Get(row_, Column);
- }
-
- ALWAYS_INLINE bool Equals(const Accessor& other) {
- return this->table_ == other.table_ && this->row_ == other.row_;
- }
-
-// Helper macro to create constructors and per-table utilities in derived class.
-#define BIT_TABLE_HEADER() \
- using BitTable<kCount>::Accessor::Accessor; /* inherit the constructors */ \
- template<int COLUMN, int UNUSED /*needed to compile*/> struct ColumnName; \
+ static constexpr uint32_t kNoValue = std::numeric_limits<uint32_t>::max(); // == -1.
+ static constexpr uint32_t kValueBias = kNoValue; // Bias so that -1 is encoded as 0.
-// Helper macro to create named column accessors in derived class.
-#define BIT_TABLE_COLUMN(COLUMN, NAME) \
- static constexpr uint32_t k##NAME = COLUMN; \
- ALWAYS_INLINE uint32_t Get##NAME() const { \
- return table_->Get(row_, COLUMN); \
- } \
- ALWAYS_INLINE bool Has##NAME() const { \
- return table_->Get(row_, COLUMN) != kNoValue; \
- } \
- template<int UNUSED> struct ColumnName<COLUMN, UNUSED> { \
- static constexpr const char* Value = #NAME; \
- }; \
-
- protected:
- const BitTable* table_ = nullptr;
- uint32_t row_ = -1;
- };
-
- static constexpr uint32_t kValueBias = -1;
-
- BitTable() {}
- BitTable(void* data, size_t size, size_t* bit_offset = 0) {
- Decode(BitMemoryRegion(MemoryRegion(data, size)), bit_offset);
+ BitTableBase() {}
+ explicit BitTableBase(BitMemoryReader& reader) {
+ Decode(reader);
}
- ALWAYS_INLINE void Decode(BitMemoryRegion region, size_t* bit_offset) {
+ ALWAYS_INLINE void Decode(BitMemoryReader& reader) {
// Decode row count and column sizes from the table header.
- num_rows_ = DecodeVarintBits(region, bit_offset);
+ size_t initial_bit_offset = reader.GetBitOffset();
+ num_rows_ = DecodeVarintBits(reader);
if (num_rows_ != 0) {
column_offset_[0] = 0;
for (uint32_t i = 0; i < kNumColumns; i++) {
- size_t column_end = column_offset_[i] + DecodeVarintBits(region, bit_offset);
+ size_t column_end = column_offset_[i] + DecodeVarintBits(reader);
column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
}
}
+ header_bit_size_ = reader.GetBitOffset() - initial_bit_offset;
// Record the region which contains the table data and skip past it.
- table_data_ = region.Subregion(*bit_offset, num_rows_ * NumRowBits());
- *bit_offset += table_data_.size_in_bits();
+ table_data_ = reader.Skip(num_rows_ * NumRowBits());
}
ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
@@ -158,55 +116,220 @@ class BitTable {
return column_offset_[column + 1] - column_offset_[column];
}
- size_t DataBitSize() const { return num_rows_ * column_offset_[kNumColumns]; }
+ size_t HeaderBitSize() const { return header_bit_size_; }
+
+ size_t BitSize() const { return header_bit_size_ + table_data_.size_in_bits(); }
protected:
BitMemoryRegion table_data_;
size_t num_rows_ = 0;
uint16_t column_offset_[kNumColumns + 1] = {};
+ uint16_t header_bit_size_ = 0;
+};
+
+// Helper class which can be used to create BitTable accessors with named getters.
+template<uint32_t NumColumns>
+class BitTableAccessor {
+ public:
+ static constexpr uint32_t kNumColumns = NumColumns;
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+
+ BitTableAccessor(const BitTableBase<kNumColumns>* table, uint32_t row)
+ : table_(table), row_(row) {
+ DCHECK(table_ != nullptr);
+ }
+
+ ALWAYS_INLINE uint32_t Row() const { return row_; }
+
+ ALWAYS_INLINE bool IsValid() const { return row_ < table_->NumRows(); }
+
+ ALWAYS_INLINE bool Equals(const BitTableAccessor& other) {
+ return this->table_ == other.table_ && this->row_ == other.row_;
+ }
+
+// Helper macro to create constructors and per-table utilities in derived class.
+#define BIT_TABLE_HEADER() \
+ using BitTableAccessor<kNumColumns>::BitTableAccessor; /* inherit constructors */ \
+ template<int COLUMN, int UNUSED /*needed to compile*/> struct ColumnName; \
+
+// Helper macro to create named column accessors in derived class.
+#define BIT_TABLE_COLUMN(COLUMN, NAME) \
+ static constexpr uint32_t k##NAME = COLUMN; \
+ ALWAYS_INLINE uint32_t Get##NAME() const { return table_->Get(row_, COLUMN); } \
+ ALWAYS_INLINE bool Has##NAME() const { return Get##NAME() != kNoValue; } \
+ template<int UNUSED> struct ColumnName<COLUMN, UNUSED> { \
+ static constexpr const char* Value = #NAME; \
+ }; \
+
+ protected:
+ const BitTableBase<kNumColumns>* table_ = nullptr;
+ uint32_t row_ = -1;
};
// Template meta-programming helper.
template<typename Accessor, size_t... Columns>
-static const char** GetBitTableColumnNamesImpl(std::index_sequence<Columns...>) {
+static const char* const* GetBitTableColumnNamesImpl(std::index_sequence<Columns...>) {
static const char* names[] = { Accessor::template ColumnName<Columns, 0>::Value... };
return names;
}
+// Returns the names of all columns in the given accessor.
template<typename Accessor>
-static const char** GetBitTableColumnNames() {
- return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kCount>());
+static const char* const* GetBitTableColumnNames() {
+ return GetBitTableColumnNamesImpl<Accessor>(std::make_index_sequence<Accessor::kNumColumns>());
}
+// Wrapper which makes it easier to use named accessors for the individual rows.
+template<typename Accessor>
+class BitTable : public BitTableBase<Accessor::kNumColumns> {
+ public:
+ class const_iterator : public std::iterator<std::random_access_iterator_tag,
+ /* value_type */ Accessor,
+ /* difference_type */ int32_t,
+ /* pointer */ void,
+ /* reference */ void> {
+ public:
+ using difference_type = int32_t;
+ const_iterator() {}
+ const_iterator(const BitTable* table, uint32_t row) : table_(table), row_(row) {}
+ const_iterator operator+(difference_type n) { return const_iterator(table_, row_ + n); }
+ const_iterator operator-(difference_type n) { return const_iterator(table_, row_ - n); }
+ difference_type operator-(const const_iterator& other) { return row_ - other.row_; }
+ void operator+=(difference_type rows) { row_ += rows; }
+ void operator-=(difference_type rows) { row_ -= rows; }
+ const_iterator operator++() { return const_iterator(table_, ++row_); }
+ const_iterator operator--() { return const_iterator(table_, --row_); }
+ const_iterator operator++(int) { return const_iterator(table_, row_++); }
+ const_iterator operator--(int) { return const_iterator(table_, row_--); }
+ bool operator==(const_iterator i) const { DCHECK(table_ == i.table_); return row_ == i.row_; }
+ bool operator!=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ != i.row_; }
+ bool operator<=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ <= i.row_; }
+ bool operator>=(const_iterator i) const { DCHECK(table_ == i.table_); return row_ >= i.row_; }
+ bool operator<(const_iterator i) const { DCHECK(table_ == i.table_); return row_ < i.row_; }
+ bool operator>(const_iterator i) const { DCHECK(table_ == i.table_); return row_ > i.row_; }
+ Accessor operator*() {
+ DCHECK_LT(row_, table_->NumRows());
+ return Accessor(table_, row_);
+ }
+ Accessor operator->() {
+ DCHECK_LT(row_, table_->NumRows());
+ return Accessor(table_, row_);
+ }
+ Accessor operator[](size_t index) {
+ DCHECK_LT(row_ + index, table_->NumRows());
+ return Accessor(table_, row_ + index);
+ }
+ private:
+ const BitTable* table_ = nullptr;
+ uint32_t row_ = 0;
+ };
+
+ using BitTableBase<Accessor::kNumColumns>::BitTableBase; // Constructors.
+
+ ALWAYS_INLINE const_iterator begin() const { return const_iterator(this, 0); }
+ ALWAYS_INLINE const_iterator end() const { return const_iterator(this, this->NumRows()); }
+
+ ALWAYS_INLINE Accessor GetRow(uint32_t row) const {
+ return Accessor(this, row);
+ }
+
+ ALWAYS_INLINE Accessor GetInvalidRow() const {
+ return Accessor(this, static_cast<uint32_t>(-1));
+ }
+};
+
+template<typename Accessor>
+typename BitTable<Accessor>::const_iterator operator+(
+ typename BitTable<Accessor>::const_iterator::difference_type n,
+ typename BitTable<Accessor>::const_iterator a) {
+ return a + n;
+}
+
+template<typename Accessor>
+class BitTableRange : public IterationRange<typename BitTable<Accessor>::const_iterator> {
+ public:
+ typedef typename BitTable<Accessor>::const_iterator const_iterator;
+
+ using IterationRange<const_iterator>::IterationRange;
+ BitTableRange() : IterationRange<const_iterator>(const_iterator(), const_iterator()) { }
+
+ bool empty() const { return this->begin() == this->end(); }
+ size_t size() const { return this->end() - this->begin(); }
+
+ Accessor operator[](size_t index) const {
+ const_iterator it = this->begin() + index;
+ DCHECK(it < this->end());
+ return *it;
+ }
+
+ Accessor back() const {
+ DCHECK(!empty());
+ return *(this->end() - 1);
+ }
+
+ void pop_back() {
+ DCHECK(!empty());
+ --this->last_;
+ }
+};
+
// Helper class for encoding BitTable. It can optionally de-duplicate the inputs.
-// Type 'T' must be POD type consisting of uint32_t fields (one for each column).
-template<typename T>
-class BitTableBuilder {
+template<uint32_t kNumColumns>
+class BitTableBuilderBase {
public:
- static_assert(std::is_pod<T>::value, "Type 'T' must be POD");
- static constexpr size_t kNumColumns = sizeof(T) / sizeof(uint32_t);
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+ static constexpr uint32_t kValueBias = BitTableBase<kNumColumns>::kValueBias;
+
+ class Entry {
+ public:
+ Entry() {
+ // The definition of kNoValue here is for host and target debug builds which complain about
+ // missing a symbol definition for BitTableBase<N>::kNovValue when optimization is off.
+ static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
+ std::fill_n(data_, kNumColumns, kNoValue);
+ }
+
+ Entry(std::initializer_list<uint32_t> values) {
+ DCHECK_EQ(values.size(), kNumColumns);
+ std::copy(values.begin(), values.end(), data_);
+ }
+
+ uint32_t& operator[](size_t column) {
+ DCHECK_LT(column, kNumColumns);
+ return data_[column];
+ }
- explicit BitTableBuilder(ScopedArenaAllocator* allocator)
+ uint32_t operator[](size_t column) const {
+ DCHECK_LT(column, kNumColumns);
+ return data_[column];
+ }
+
+ private:
+ uint32_t data_[kNumColumns];
+ };
+
+ explicit BitTableBuilderBase(ScopedArenaAllocator* allocator)
: rows_(allocator->Adapter(kArenaAllocBitTableBuilder)),
dedup_(8, allocator->Adapter(kArenaAllocBitTableBuilder)) {
}
- T& operator[](size_t row) { return rows_[row]; }
- const T& operator[](size_t row) const { return rows_[row]; }
+ Entry& operator[](size_t row) { return rows_[row]; }
+ const Entry& operator[](size_t row) const { return rows_[row]; }
+ const Entry& back() const { return rows_.back(); }
size_t size() const { return rows_.size(); }
// Append given value to the vector without de-duplication.
// This will not add the element to the dedup map to avoid its associated costs.
- void Add(T value) {
+ void Add(Entry value) {
rows_.push_back(value);
}
// Append given list of values and return the index of the first value.
// If the exact same set of values was already added, return the old index.
- uint32_t Dedup(T* values, size_t count = 1) {
+ uint32_t Dedup(Entry* values, size_t count = 1) {
FNVHash<MemoryRegion> hasher;
- uint32_t hash = hasher(MemoryRegion(values, sizeof(T) * count));
+ uint32_t hash = hasher(MemoryRegion(values, sizeof(Entry) * count));
// Check if we have already added identical set of values.
auto range = dedup_.equal_range(hash);
@@ -216,8 +339,8 @@ class BitTableBuilder {
std::equal(values,
values + count,
rows_.begin() + index,
- [](const T& lhs, const T& rhs) {
- return memcmp(&lhs, &rhs, sizeof(T)) == 0;
+ [](const Entry& lhs, const Entry& rhs) {
+ return memcmp(&lhs, &rhs, sizeof(Entry)) == 0;
})) {
return index;
}
@@ -230,11 +353,8 @@ class BitTableBuilder {
return index;
}
- ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column) const {
- DCHECK_LT(row, size());
- DCHECK_LT(column, kNumColumns);
- const uint32_t* data = reinterpret_cast<const uint32_t*>(&rows_[row]);
- return data[column];
+ uint32_t Dedup(Entry value) {
+ return Dedup(&value, /* count */ 1);
}
// Calculate the column bit widths based on the current data.
@@ -243,7 +363,7 @@ class BitTableBuilder {
std::fill_n(max_column_value, kNumColumns, 0);
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- max_column_value[c] |= Get(r, c) - BitTable<kNumColumns>::kValueBias;
+ max_column_value[c] |= rows_[r][c] - kValueBias;
}
}
for (uint32_t c = 0; c < kNumColumns; c++) {
@@ -253,52 +373,54 @@ class BitTableBuilder {
// Encode the stored data into a BitTable.
template<typename Vector>
- void Encode(Vector* out, size_t* bit_offset) const {
- constexpr uint32_t bias = BitTable<kNumColumns>::kValueBias;
- size_t initial_bit_offset = *bit_offset;
+ void Encode(BitMemoryWriter<Vector>& out) const {
+ size_t initial_bit_offset = out.GetBitOffset();
std::array<uint32_t, kNumColumns> column_bits;
Measure(&column_bits);
- EncodeVarintBits(out, bit_offset, size());
+ EncodeVarintBits(out, size());
if (size() != 0) {
// Write table header.
for (uint32_t c = 0; c < kNumColumns; c++) {
- EncodeVarintBits(out, bit_offset, column_bits[c]);
+ EncodeVarintBits(out, column_bits[c]);
}
// Write table data.
- uint32_t row_bits = std::accumulate(column_bits.begin(), column_bits.end(), 0u);
- out->resize(BitsToBytesRoundUp(*bit_offset + row_bits * size()));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- region.StoreBitsAndAdvance(bit_offset, Get(r, c) - bias, column_bits[c]);
+ out.WriteBits(rows_[r][c] - kValueBias, column_bits[c]);
}
}
}
// Verify the written data.
if (kIsDebugBuild) {
- BitTable<kNumColumns> table;
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- table.Decode(region, &initial_bit_offset);
+ BitTableBase<kNumColumns> table;
+ BitMemoryReader reader(out.data(), initial_bit_offset);
+ table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
for (uint32_t c = 0; c < kNumColumns; c++) {
DCHECK_EQ(column_bits[c], table.NumColumnBits(c));
}
for (uint32_t r = 0; r < size(); r++) {
for (uint32_t c = 0; c < kNumColumns; c++) {
- DCHECK_EQ(Get(r, c), table.Get(r, c)) << " (" << r << ", " << c << ")";
+ DCHECK_EQ(rows_[r][c], table.Get(r, c)) << " (" << r << ", " << c << ")";
}
}
}
}
protected:
- ScopedArenaDeque<T> rows_;
+ ScopedArenaDeque<Entry> rows_;
ScopedArenaUnorderedMultimap<uint32_t, uint32_t> dedup_; // Hash -> row index.
};
+template<typename Accessor>
+class BitTableBuilder : public BitTableBuilderBase<Accessor::kNumColumns> {
+ public:
+ using BitTableBuilderBase<Accessor::kNumColumns>::BitTableBuilderBase; // Constructors.
+};
+
// Helper class for encoding single-column BitTable of bitmaps (allows more than 32 bits).
class BitmapTableBuilder {
public:
@@ -342,28 +464,26 @@ class BitmapTableBuilder {
// Encode the stored data into a BitTable.
template<typename Vector>
- void Encode(Vector* out, size_t* bit_offset) const {
- size_t initial_bit_offset = *bit_offset;
+ void Encode(BitMemoryWriter<Vector>& out) const {
+ size_t initial_bit_offset = out.GetBitOffset();
- EncodeVarintBits(out, bit_offset, size());
+ EncodeVarintBits(out, size());
if (size() != 0) {
- EncodeVarintBits(out, bit_offset, max_num_bits_);
+ EncodeVarintBits(out, max_num_bits_);
// Write table data.
- out->resize(BitsToBytesRoundUp(*bit_offset + max_num_bits_ * size()));
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
for (MemoryRegion row : rows_) {
BitMemoryRegion src(row);
- region.StoreBits(*bit_offset, src, std::min(max_num_bits_, src.size_in_bits()));
- *bit_offset += max_num_bits_;
+ BitMemoryRegion dst = out.Allocate(max_num_bits_);
+ dst.StoreBits(/* bit_offset */ 0, src, std::min(max_num_bits_, src.size_in_bits()));
}
}
// Verify the written data.
if (kIsDebugBuild) {
- BitTable<1> table;
- BitMemoryRegion region(MemoryRegion(out->data(), out->size()));
- table.Decode(region, &initial_bit_offset);
+ BitTableBase<1> table;
+ BitMemoryReader reader(out.data(), initial_bit_offset);
+ table.Decode(reader);
DCHECK_EQ(size(), table.NumRows());
DCHECK_EQ(max_num_bits_, table.NumColumnBits(0));
for (uint32_t r = 0; r < size(); r++) {
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
index 8abf0da9d9..2fd9052516 100644
--- a/libartbase/base/bit_table_test.cc
+++ b/libartbase/base/bit_table_test.cc
@@ -31,13 +31,12 @@ TEST(BitTableTest, TestVarint) {
uint32_t values[] = { 0, 1, 11, 12, 15, 16, 255, 256, ~1u, ~0u };
for (uint32_t value : values) {
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = start_bit_offset;
- EncodeVarintBits(&buffer, &encode_bit_offset, value);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+ EncodeVarintBits(writer, value);
- size_t decode_bit_offset = start_bit_offset;
- BitMemoryRegion region(MemoryRegion(buffer.data(), buffer.size()));
- uint32_t result = DecodeVarintBits(region, &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data(), start_bit_offset);
+ uint32_t result = DecodeVarintBits(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(value, result);
}
}
@@ -49,13 +48,13 @@ TEST(BitTableTest, TestEmptyTable) {
ScopedArenaAllocator allocator(&arena_stack);
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Encode(&buffer, &encode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Encode(writer);
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(0u, table.NumRows());
}
@@ -66,17 +65,17 @@ TEST(BitTableTest, TestSingleColumnTable) {
constexpr uint32_t kNoValue = -1;
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Add(42u);
- builder.Add(kNoValue);
- builder.Add(1000u);
- builder.Add(kNoValue);
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Add({42u});
+ builder.Add({kNoValue});
+ builder.Add({1000u});
+ builder.Add({kNoValue});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(4u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
EXPECT_EQ(kNoValue, table.Get(1));
@@ -92,14 +91,14 @@ TEST(BitTableTest, TestUnalignedTable) {
for (size_t start_bit_offset = 0; start_bit_offset <= 32; start_bit_offset++) {
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = start_bit_offset;
- BitTableBuilder<uint32_t> builder(&allocator);
- builder.Add(42u);
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = start_bit_offset;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset) << " start_bit_offset=" << start_bit_offset;
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer, start_bit_offset);
+ BitTableBuilderBase<1> builder(&allocator);
+ builder.Add({42u});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data(), start_bit_offset);
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(1u, table.NumRows());
EXPECT_EQ(42u, table.Get(0));
}
@@ -112,21 +111,15 @@ TEST(BitTableTest, TestBigTable) {
constexpr uint32_t kNoValue = -1;
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
- struct RowData {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- };
- BitTableBuilder<RowData> builder(&allocator);
- builder.Add(RowData{42u, kNoValue, 0u, static_cast<uint32_t>(-2)});
- builder.Add(RowData{62u, kNoValue, 63u, static_cast<uint32_t>(-3)});
- builder.Encode(&buffer, &encode_bit_offset);
-
- size_t decode_bit_offset = 0;
- BitTable<4> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
+ BitTableBuilderBase<4> builder(&allocator);
+ builder.Add({42u, kNoValue, 0u, static_cast<uint32_t>(-2)});
+ builder.Add({62u, kNoValue, 63u, static_cast<uint32_t>(-3)});
+ builder.Encode(writer);
+
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<4> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
EXPECT_EQ(2u, table.NumRows());
EXPECT_EQ(42u, table.Get(0, 0));
EXPECT_EQ(kNoValue, table.Get(0, 1));
@@ -147,13 +140,9 @@ TEST(BitTableTest, TestDedup) {
ArenaStack arena_stack(&pool);
ScopedArenaAllocator allocator(&arena_stack);
- struct RowData {
- uint32_t a;
- uint32_t b;
- };
- BitTableBuilder<RowData> builder(&allocator);
- RowData value0{1, 2};
- RowData value1{3, 4};
+ BitTableBuilderBase<2> builder(&allocator);
+ BitTableBuilderBase<2>::Entry value0{1, 2};
+ BitTableBuilderBase<2>::Entry value1{3, 4};
EXPECT_EQ(0u, builder.Dedup(&value0));
EXPECT_EQ(1u, builder.Dedup(&value1));
EXPECT_EQ(0u, builder.Dedup(&value0));
@@ -167,7 +156,7 @@ TEST(BitTableTest, TestBitmapTable) {
ScopedArenaAllocator allocator(&arena_stack);
std::vector<uint8_t> buffer;
- size_t encode_bit_offset = 0;
+ BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
const uint64_t value = 0xDEADBEEF0BADF00Dull;
BitmapTableBuilder builder(&allocator);
std::multimap<uint64_t, size_t> indicies; // bitmap -> row.
@@ -175,12 +164,12 @@ TEST(BitTableTest, TestBitmapTable) {
uint64_t bitmap = value & MaxInt<uint64_t>(bit_length);
indicies.emplace(bitmap, builder.Dedup(&bitmap, MinimumBitsToStore(bitmap)));
}
- builder.Encode(&buffer, &encode_bit_offset);
+ builder.Encode(writer);
EXPECT_EQ(1 + static_cast<uint32_t>(POPCOUNT(value)), builder.size());
- size_t decode_bit_offset = 0;
- BitTable<1> table(buffer.data(), buffer.size(), &decode_bit_offset);
- EXPECT_EQ(encode_bit_offset, decode_bit_offset);
+ BitMemoryReader reader(buffer.data());
+ BitTableBase<1> table(reader);
+ EXPECT_EQ(writer.GetBitOffset(), reader.GetBitOffset());
for (auto it : indicies) {
uint64_t expected = it.first;
BitMemoryRegion actual = table.GetBitMemoryRegion(it.second);
@@ -197,16 +186,12 @@ TEST(BitTableTest, TestCollisions) {
ScopedArenaAllocator allocator(&arena_stack);
FNVHash<MemoryRegion> hasher;
- struct RowData {
- uint32_t a;
- uint32_t b;
- };
- RowData value0{56948505, 0};
- RowData value1{67108869, 0};
+ BitTableBuilderBase<2>::Entry value0{56948505, 0};
+ BitTableBuilderBase<2>::Entry value1{67108869, 0};
- BitTableBuilder<RowData> builder(&allocator);
- EXPECT_EQ(hasher(MemoryRegion(&value0, sizeof(RowData))),
- hasher(MemoryRegion(&value1, sizeof(RowData))));
+ BitTableBuilderBase<2> builder(&allocator);
+ EXPECT_EQ(hasher(MemoryRegion(&value0, sizeof(value0))),
+ hasher(MemoryRegion(&value1, sizeof(value1))));
EXPECT_EQ(0u, builder.Dedup(&value0));
EXPECT_EQ(1u, builder.Dedup(&value1));
EXPECT_EQ(0u, builder.Dedup(&value0));
@@ -214,12 +199,12 @@ TEST(BitTableTest, TestCollisions) {
EXPECT_EQ(2u, builder.size());
BitmapTableBuilder builder2(&allocator);
- EXPECT_EQ(hasher(MemoryRegion(&value0, BitsToBytesRoundUp(MinimumBitsToStore(value0.a)))),
- hasher(MemoryRegion(&value1, BitsToBytesRoundUp(MinimumBitsToStore(value1.a)))));
- EXPECT_EQ(0u, builder2.Dedup(&value0.a, MinimumBitsToStore(value0.a)));
- EXPECT_EQ(1u, builder2.Dedup(&value1.a, MinimumBitsToStore(value1.a)));
- EXPECT_EQ(0u, builder2.Dedup(&value0.a, MinimumBitsToStore(value0.a)));
- EXPECT_EQ(1u, builder2.Dedup(&value1.a, MinimumBitsToStore(value1.a)));
+ EXPECT_EQ(hasher(MemoryRegion(&value0, BitsToBytesRoundUp(MinimumBitsToStore(value0[0])))),
+ hasher(MemoryRegion(&value1, BitsToBytesRoundUp(MinimumBitsToStore(value1[0])))));
+ EXPECT_EQ(0u, builder2.Dedup(&value0[0], MinimumBitsToStore(value0[0])));
+ EXPECT_EQ(1u, builder2.Dedup(&value1[0], MinimumBitsToStore(value1[0])));
+ EXPECT_EQ(0u, builder2.Dedup(&value0[0], MinimumBitsToStore(value0[0])));
+ EXPECT_EQ(1u, builder2.Dedup(&value1[0], MinimumBitsToStore(value1[0])));
EXPECT_EQ(2u, builder2.size());
}
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 3998be516d..0ace09de1a 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -210,23 +210,11 @@ using CommonArtTestWithParam = CommonArtTestBase<testing::TestWithParam<Param>>;
}
#define TEST_DISABLED_FOR_MEMORY_TOOL() \
- if (RUNNING_ON_MEMORY_TOOL > 0) { \
+ if (kRunningOnMemoryTool) { \
printf("WARNING: TEST DISABLED FOR MEMORY TOOL\n"); \
return; \
}
-#define TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND() \
- if (RUNNING_ON_MEMORY_TOOL > 0 && kMemoryToolIsValgrind) { \
- printf("WARNING: TEST DISABLED FOR MEMORY TOOL VALGRIND\n"); \
- return; \
- }
-
-#define TEST_DISABLED_FOR_MEMORY_TOOL_ASAN() \
- if (RUNNING_ON_MEMORY_TOOL > 0 && !kMemoryToolIsValgrind) { \
- printf("WARNING: TEST DISABLED FOR MEMORY TOOL ASAN\n"); \
- return; \
- }
-
#define TEST_DISABLED_FOR_HEAP_POISONING() \
if (kPoisonHeapReferences) { \
printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
@@ -234,4 +222,10 @@ using CommonArtTestWithParam = CommonArtTestBase<testing::TestWithParam<Param>>;
}
} // namespace art
+#define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING() \
+ if (kRunningOnMemoryTool && kPoisonHeapReferences) { \
+ printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING\n"); \
+ return; \
+ }
+
#endif // ART_LIBARTBASE_BASE_COMMON_ART_TEST_H_
diff --git a/libartbase/base/data_hash.h b/libartbase/base/data_hash.h
new file mode 100644
index 0000000000..5ad7779b80
--- /dev/null
+++ b/libartbase/base/data_hash.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_DATA_HASH_H_
+#define ART_LIBARTBASE_BASE_DATA_HASH_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+}
+
+class DataHash {
+ private:
+ static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+ template <class Container>
+ size_t operator()(const Container& array) const {
+ // Containers that provide the data() function use contiguous storage.
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+ uint32_t len = sizeof(typename Container::value_type) * array.size();
+ if (kUseMurmur3Hash) {
+ static constexpr uint32_t c1 = 0xcc9e2d51;
+ static constexpr uint32_t c2 = 0x1b873593;
+ static constexpr uint32_t r1 = 15;
+ static constexpr uint32_t r2 = 13;
+ static constexpr uint32_t m = 5;
+ static constexpr uint32_t n = 0xe6546b64;
+
+ uint32_t hash = 0;
+
+ const int nblocks = len / 4;
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+ int i;
+ for (i = 0; i < nblocks; i++) {
+ uint32_t k = blocks[i];
+ k *= c1;
+ k = (k << r1) | (k >> (32 - r1));
+ k *= c2;
+
+ hash ^= k;
+ hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+ }
+
+ const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ k1 ^= tail[1] << 8;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ k1 ^= tail[0];
+
+ k1 *= c1;
+ k1 = (k1 << r1) | (k1 >> (32 - r1));
+ k1 *= c2;
+ hash ^= k1;
+ }
+
+ hash ^= len;
+ hash ^= (hash >> 16);
+ hash *= 0x85ebca6b;
+ hash ^= (hash >> 13);
+ hash *= 0xc2b2ae35;
+ hash ^= (hash >> 16);
+
+ return hash;
+ } else {
+ return HashBytes(data, len);
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_DATA_HASH_H_
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 56d1c44fc0..2a7273b85e 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -69,12 +69,11 @@ TEST_F(FileUtilsTest, GetAndroidRootSafe) {
EXPECT_EQ(android_root, android_root_env);
// Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
- char* root_dup = strdup(android_root_env.c_str());
- char* dir = dirname(root_dup);
+ UniqueCPtr<char> root_dup(strdup(android_root_env.c_str()));
+ char* dir = dirname(root_dup.get());
ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
std::string android_root2 = GetAndroidRootSafe(&error_msg);
EXPECT_STREQ(dir, android_root2.c_str());
- free(root_dup);
// Set a bogus value for ANDROID_ROOT. This should be an error.
ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
diff --git a/libartbase/base/fuchsia_compat.h b/libartbase/base/fuchsia_compat.h
new file mode 100644
index 0000000000..018bac0528
--- /dev/null
+++ b/libartbase/base/fuchsia_compat.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+#define ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
+
+// stubs for features lacking in Fuchsia
+
+struct rlimit {
+ int rlim_cur;
+};
+
+#define RLIMIT_FSIZE (1)
+#define RLIM_INFINITY (-1)
+static int getrlimit(int resource, struct rlimit *rlim) {
+ LOG(FATAL) << "getrlimit not available for Fuchsia";
+}
+
+static int ashmem_create_region(const char *name, size_t size) {
+ LOG(FATAL) << "ashmem_create_region not available for Fuchsia";
+}
+
+#endif // ART_LIBARTBASE_BASE_FUCHSIA_COMPAT_H_
diff --git a/libartbase/base/globals.h b/libartbase/base/globals.h
index 39e0c509cd..cd0bf8fafc 100644
--- a/libartbase/base/globals.h
+++ b/libartbase/base/globals.h
@@ -74,7 +74,9 @@ static constexpr bool kIsPGOInstrumentation = false;
// ART_TARGET - Defined for target builds of ART.
// ART_TARGET_LINUX - Defined for target Linux builds of ART.
// ART_TARGET_ANDROID - Defined for target Android builds of ART.
-// Note: Either ART_TARGET_LINUX or ART_TARGET_ANDROID need to be set when ART_TARGET is set.
+// ART_TARGET_FUCHSIA - Defined for Fuchsia builds of ART.
+// Note: Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA
+// need to be set when ART_TARGET is set.
// Note: When ART_TARGET_LINUX is defined mem_map.h will not be using Ashmem for memory mappings
// (usually only available on Android kernels).
#if defined(ART_TARGET)
@@ -82,10 +84,16 @@ static constexpr bool kIsPGOInstrumentation = false;
static constexpr bool kIsTargetBuild = true;
# if defined(ART_TARGET_LINUX)
static constexpr bool kIsTargetLinux = true;
+static constexpr bool kIsTargetFuchsia = false;
# elif defined(ART_TARGET_ANDROID)
static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
+# elif defined(ART_TARGET_FUCHSIA)
+static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = true;
# else
-# error "Either ART_TARGET_LINUX or ART_TARGET_ANDROID needs to be defined for target builds."
+# error "Either ART_TARGET_LINUX, ART_TARGET_ANDROID or ART_TARGET_FUCHSIA " \
+ "needs to be defined for target builds."
# endif
#else
static constexpr bool kIsTargetBuild = false;
@@ -93,8 +101,11 @@ static constexpr bool kIsTargetBuild = false;
# error "ART_TARGET_LINUX defined for host build."
# elif defined(ART_TARGET_ANDROID)
# error "ART_TARGET_ANDROID defined for host build."
+# elif defined(ART_TARGET_FUCHSIA)
+# error "ART_TARGET_FUCHSIA defined for host build."
# else
static constexpr bool kIsTargetLinux = false;
+static constexpr bool kIsTargetFuchsia = false;
# endif
#endif
diff --git a/libartbase/base/hash_map.h b/libartbase/base/hash_map.h
index 0d7198c0f7..a3bb5b5550 100644
--- a/libartbase/base/hash_map.h
+++ b/libartbase/base/hash_map.h
@@ -48,9 +48,12 @@ class HashMapWrapper {
Fn fn_;
};
-template <class Key, class Value, class EmptyFn,
- class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
- class Alloc = std::allocator<std::pair<Key, Value>>>
+template <class Key,
+ class Value,
+ class EmptyFn,
+ class HashFn = DefaultHashFn<Key>,
+ class Pred = DefaultPred<Key>,
+ class Alloc = std::allocator<std::pair<Key, Value>>>
class HashMap : public HashSet<std::pair<Key, Value>,
EmptyFn,
HashMapWrapper<HashFn>,
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 2f810eaade..2b1a5eb947 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -22,16 +22,94 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <string>
#include <type_traits>
#include <utility>
#include <android-base/logging.h>
+#include "base/data_hash.h"
#include "bit_utils.h"
#include "macros.h"
namespace art {
+template <class Elem, class HashSetType>
+class HashSetIterator : std::iterator<std::forward_iterator_tag, Elem> {
+ public:
+ HashSetIterator(const HashSetIterator&) = default;
+ HashSetIterator(HashSetIterator&&) = default;
+ HashSetIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {}
+
+ // Conversion from iterator to const_iterator.
+ template <class OtherElem,
+ class OtherHashSetType,
+ typename = typename std::enable_if<
+ std::is_same<Elem, const OtherElem>::value &&
+ std::is_same<HashSetType, const OtherHashSetType>::value>::type>
+ HashSetIterator(const HashSetIterator<OtherElem, OtherHashSetType>& other)
+ : index_(other.index_), hash_set_(other.hash_set_) {}
+
+ HashSetIterator& operator=(const HashSetIterator&) = default;
+ HashSetIterator& operator=(HashSetIterator&&) = default;
+
+ bool operator==(const HashSetIterator& other) const {
+ return hash_set_ == other.hash_set_ && this->index_ == other.index_;
+ }
+
+ bool operator!=(const HashSetIterator& other) const {
+ return !(*this == other);
+ }
+
+ HashSetIterator operator++() { // Value after modification.
+ this->index_ = hash_set_->NextNonEmptySlot(index_);
+ return *this;
+ }
+
+ HashSetIterator operator++(int) {
+ HashSetIterator temp = *this;
+ ++*this;
+ return temp;
+ }
+
+ Elem& operator*() const {
+ DCHECK(!hash_set_->IsFreeSlot(this->index_));
+ return hash_set_->ElementForIndex(this->index_);
+ }
+
+ Elem* operator->() const {
+ return &**this;
+ }
+
+ private:
+ size_t index_;
+ HashSetType* hash_set_;
+
+ template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+ friend bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs);
+ template <class T, class EmptyFn, class HashFn, class Pred, class Alloc> friend class HashSet;
+ template <class OtherElem, class OtherHashSetType> friend class HashSetIterator;
+};
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator==(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ static_assert(
+ std::is_convertible<HashSetIterator<Elem1, HashSetType1>,
+ HashSetIterator<Elem2, HashSetType2>>::value ||
+ std::is_convertible<HashSetIterator<Elem2, HashSetType2>,
+ HashSetIterator<Elem1, HashSetType1>>::value, "Bad iterator types.");
+ DCHECK_EQ(lhs.hash_set_, rhs.hash_set_);
+ return lhs.index_ == rhs.index_;
+}
+
+template <class Elem1, class HashSetType1, class Elem2, class HashSetType2>
+bool operator!=(const HashSetIterator<Elem1, HashSetType1>& lhs,
+ const HashSetIterator<Elem2, HashSetType2>& rhs) {
+ return !(lhs == rhs);
+}
+
// Returns true if an item is empty.
template <class T>
class DefaultEmptyFn {
@@ -55,70 +133,35 @@ class DefaultEmptyFn<T*> {
}
};
-// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
-// boxed. Uses linear probing to resolve collisions.
+template <class T>
+using DefaultHashFn = typename std::conditional<std::is_same<T, std::string>::value,
+ DataHash,
+ std::hash<T>>::type;
+
+struct DefaultStringEquals {
+ // Allow comparison with anything that can be compared to std::string, for example StringPiece.
+ template <typename T>
+ bool operator()(const std::string& lhs, const T& rhs) const {
+ return lhs == rhs;
+ }
+};
+
+template <class T>
+using DefaultPred = typename std::conditional<std::is_same<T, std::string>::value,
+ DefaultStringEquals,
+ std::equal_to<T>>::type;
+
+// Low memory version of a hash set, uses less memory than std::unordered_multiset since elements
+// aren't boxed. Uses linear probing to resolve collisions.
// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
// TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
// and more complicated.
-template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
- class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+template <class T,
+ class EmptyFn = DefaultEmptyFn<T>,
+ class HashFn = DefaultHashFn<T>,
+ class Pred = DefaultPred<T>,
+ class Alloc = std::allocator<T>>
class HashSet {
- template <class Elem, class HashSetType>
- class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
- public:
- BaseIterator(const BaseIterator&) = default;
- BaseIterator(BaseIterator&&) = default;
- BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
- }
- BaseIterator& operator=(const BaseIterator&) = default;
- BaseIterator& operator=(BaseIterator&&) = default;
-
- bool operator==(const BaseIterator& other) const {
- return hash_set_ == other.hash_set_ && this->index_ == other.index_;
- }
-
- bool operator!=(const BaseIterator& other) const {
- return !(*this == other);
- }
-
- BaseIterator operator++() { // Value after modification.
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return *this;
- }
-
- BaseIterator operator++(int) {
- BaseIterator temp = *this;
- this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
- return temp;
- }
-
- Elem& operator*() const {
- DCHECK(!hash_set_->IsFreeSlot(this->index_));
- return hash_set_->ElementForIndex(this->index_);
- }
-
- Elem* operator->() const {
- return &**this;
- }
-
- // TODO: Operator -- --(int) (and use std::bidirectional_iterator_tag)
-
- private:
- size_t index_;
- HashSetType* hash_set_;
-
- size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
- const size_t num_buckets = hash_set->NumBuckets();
- DCHECK_LT(index, num_buckets);
- do {
- ++index;
- } while (index < num_buckets && hash_set->IsFreeSlot(index));
- return index;
- }
-
- friend class HashSet;
- };
-
public:
using value_type = T;
using allocator_type = Alloc;
@@ -126,8 +169,8 @@ class HashSet {
using const_reference = const T&;
using pointer = T*;
using const_pointer = const T*;
- using iterator = BaseIterator<T, HashSet>;
- using const_iterator = BaseIterator<const T, const HashSet>;
+ using iterator = HashSetIterator<T, HashSet>;
+ using const_iterator = HashSetIterator<const T, const HashSet>;
using size_type = size_t;
using difference_type = ptrdiff_t;
@@ -136,7 +179,7 @@ class HashSet {
static constexpr size_t kMinBuckets = 1000;
// If we don't own the data, this will create a new array which owns the data.
- void Clear() {
+ void clear() {
DeallocateStorage();
num_elements_ = 0;
elements_until_expand_ = 0;
@@ -300,13 +343,12 @@ class HashSet {
return const_iterator(this, NumBuckets());
}
- bool Empty() const {
- return Size() == 0;
+ size_t size() const {
+ return num_elements_;
}
- // Return true if the hash set has ownership of the underlying data.
- bool OwnsData() const {
- return owns_data_;
+ bool empty() const {
+ return size() == 0;
}
// Erase algorithm:
@@ -317,7 +359,7 @@ class HashSet {
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
// element to its actual location/index.
- iterator Erase(iterator it) {
+ iterator erase(iterator it) {
// empty_index is the index that will become empty.
size_t empty_index = it.index_;
DCHECK(!IsFreeSlot(empty_index));
@@ -368,12 +410,12 @@ class HashSet {
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
- iterator Find(const K& key) {
+ iterator find(const K& key) {
return FindWithHash(key, hashfn_(key));
}
template <typename K>
- const_iterator Find(const K& key) const {
+ const_iterator find(const K& key) const {
return FindWithHash(key, hashfn_(key));
}
@@ -387,14 +429,26 @@ class HashSet {
return const_iterator(this, FindIndex(key, hash));
}
+ // Insert an element with hint, allows duplicates.
+ // Note: The hint is not very useful for a HashSet<> unless there are many hash conflicts
+ // and in that case the use of HashSet<> itself should be reconsidered.
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
+ return insert(element);
+ }
+ iterator insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
+ return insert(std::move(element));
+ }
+
// Insert an element, allows duplicates.
- template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void Insert(U&& element) {
- InsertWithHash(std::forward<U>(element), hashfn_(element));
+ iterator insert(const T& element) {
+ return InsertWithHash(element, hashfn_(element));
+ }
+ iterator insert(T&& element) {
+ return InsertWithHash(std::move(element), hashfn_(element));
}
template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
- void InsertWithHash(U&& element, size_t hash) {
+ iterator InsertWithHash(U&& element, size_t hash) {
DCHECK_EQ(hash, hashfn_(element));
if (num_elements_ >= elements_until_expand_) {
Expand();
@@ -403,10 +457,7 @@ class HashSet {
const size_t index = FirstAvailableSlot(IndexForHash(hash));
data_[index] = std::forward<U>(element);
++num_elements_;
- }
-
- size_t Size() const {
- return num_elements_;
+ return iterator(this, index);
}
void swap(HashSet& other) {
@@ -430,12 +481,12 @@ class HashSet {
}
void ShrinkToMaximumLoad() {
- Resize(Size() / max_load_factor_);
+ Resize(size() / max_load_factor_);
}
// Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
// set. No-op if the hash set is already large enough to do this.
- void Reserve(size_t num_elements) {
+ void reserve(size_t num_elements) {
size_t num_buckets = num_elements / max_load_factor_;
// Deal with rounding errors. Add one for rounding.
while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
@@ -466,7 +517,7 @@ class HashSet {
// Calculate the current load factor and return it.
double CalculateLoadFactor() const {
- return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+ return static_cast<double>(size()) / static_cast<double>(NumBuckets());
}
// Make sure that everything reinserts in the right spot. Returns the number of errors.
@@ -510,7 +561,7 @@ class HashSet {
// maximum load factor.
const double load_factor = CalculateLoadFactor();
if (load_factor > max_load_factor_) {
- Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ Resize(size() / ((min_load_factor_ + max_load_factor_) * 0.5));
}
}
@@ -605,7 +656,7 @@ class HashSet {
// Expand the set based on the load factors.
void Expand() {
- size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+ size_t min_index = static_cast<size_t>(size() / min_load_factor_);
// Resize based on the minimum load factor.
Resize(min_index);
}
@@ -615,7 +666,7 @@ class HashSet {
if (new_size < kMinBuckets) {
new_size = kMinBuckets;
}
- DCHECK_GE(new_size, Size());
+ DCHECK_GE(new_size, size());
T* const old_data = data_;
size_t old_num_buckets = num_buckets_;
// Reinsert all of the old elements.
@@ -649,6 +700,15 @@ class HashSet {
return index;
}
+ size_t NextNonEmptySlot(size_t index) const {
+ const size_t num_buckets = NumBuckets();
+ DCHECK_LT(index, num_buckets);
+ do {
+ ++index;
+ } while (index < num_buckets && IsFreeSlot(index));
+ return index;
+ }
+
// Return new offset.
template <typename Elem>
static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
@@ -679,6 +739,9 @@ class HashSet {
double min_load_factor_;
double max_load_factor_;
+ template <class Elem, class HashSetType>
+ friend class HashSetIterator;
+
ART_FRIEND_TEST(InternTableTest, CrossHash);
};
diff --git a/libartbase/base/hash_set_test.cc b/libartbase/base/hash_set_test.cc
index ff745b4be5..782a68b5d5 100644
--- a/libartbase/base/hash_set_test.cc
+++ b/libartbase/base/hash_set_test.cc
@@ -24,6 +24,8 @@
#include <vector>
#include <gtest/gtest.h>
+
+#include "base/stringpiece.h"
#include "hash_map.h"
namespace art {
@@ -66,16 +68,16 @@ class HashSetTest : public testing::Test {
TEST_F(HashSetTest, TestSmoke) {
HashSet<std::string, IsEmptyFnString> hash_set;
const std::string test_string = "hello world 1234";
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- hash_set.Insert(test_string);
- auto it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ hash_set.insert(test_string);
+ auto it = hash_set.find(test_string);
ASSERT_EQ(*it, test_string);
- auto after_it = hash_set.Erase(it);
+ auto after_it = hash_set.erase(it);
ASSERT_TRUE(after_it == hash_set.end());
- ASSERT_TRUE(hash_set.Empty());
- ASSERT_EQ(hash_set.Size(), 0U);
- it = hash_set.Find(test_string);
+ ASSERT_TRUE(hash_set.empty());
+ ASSERT_EQ(hash_set.size(), 0U);
+ it = hash_set.find(test_string);
ASSERT_TRUE(it == hash_set.end());
}
@@ -86,26 +88,26 @@ TEST_F(HashSetTest, TestInsertAndErase) {
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
- auto it = hash_set.Find(strings[i]);
+ hash_set.insert(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
- ASSERT_EQ(strings.size(), hash_set.Size());
+ ASSERT_EQ(strings.size(), hash_set.size());
// Try to erase the odd strings.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
- hash_set.Erase(it);
+ hash_set.erase(it);
}
// Test removed.
for (size_t i = 1; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it == hash_set.end());
}
for (size_t i = 0; i < count; i += 2) {
- auto it = hash_set.Find(strings[i]);
+ auto it = hash_set.find(strings[i]);
ASSERT_TRUE(it != hash_set.end());
ASSERT_EQ(*it, strings[i]);
}
@@ -119,7 +121,7 @@ TEST_F(HashSetTest, TestIterator) {
for (size_t i = 0; i < count; ++i) {
// Insert a bunch of elements and make sure we can find them.
strings.push_back(RandomString(10));
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
// Make sure we visit each string exactly once.
std::map<std::string, size_t> found_count;
@@ -133,7 +135,7 @@ TEST_F(HashSetTest, TestIterator) {
// Remove all the elements with iterator erase.
for (auto it = hash_set.begin(); it != hash_set.end();) {
++found_count[*it];
- it = hash_set.Erase(it);
+ it = hash_set.erase(it);
ASSERT_EQ(hash_set.Verify(), 0U);
}
for (size_t i = 0; i < count; ++i) {
@@ -147,14 +149,14 @@ TEST_F(HashSetTest, TestSwap) {
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
std::swap(hash_seta, hash_setb);
- hash_seta.Insert("TEST");
- hash_setb.Insert("TEST2");
+ hash_seta.insert("TEST");
+ hash_setb.insert("TEST2");
for (size_t i = 0; i < count; ++i) {
strings.push_back(RandomString(10));
- hash_seta.Insert(strings[i]);
+ hash_seta.insert(strings[i]);
}
}
@@ -163,7 +165,7 @@ TEST_F(HashSetTest, TestShrink) {
std::vector<std::string> strings = {"a", "b", "c", "d", "e", "f", "g"};
for (size_t i = 0; i < strings.size(); ++i) {
// Insert some strings into the beginning of our hash set to establish an initial size
- hash_set.Insert(strings[i]);
+ hash_set.insert(strings[i]);
}
hash_set.ShrinkToMaximumLoad();
@@ -174,12 +176,12 @@ TEST_F(HashSetTest, TestShrink) {
static constexpr size_t count = 1000;
for (size_t i = 0; i < count; ++i) {
random_strings.push_back(RandomString(10));
- hash_set.Insert(random_strings[i]);
+ hash_set.insert(random_strings[i]);
}
// Erase all the extra strings which guarantees that our load factor will be really bad.
for (size_t i = 0; i < count; ++i) {
- hash_set.Erase(hash_set.Find(random_strings[i]));
+ hash_set.erase(hash_set.find(random_strings[i]));
}
const double bad_load = hash_set.CalculateLoadFactor();
@@ -191,7 +193,7 @@ TEST_F(HashSetTest, TestShrink) {
// Make sure all the initial elements we had are still there
for (const std::string& initial_string : strings) {
- EXPECT_NE(hash_set.end(), hash_set.Find(initial_string))
+ EXPECT_NE(hash_set.end(), hash_set.find(initial_string))
<< "expected to find " << initial_string;
}
}
@@ -201,7 +203,7 @@ TEST_F(HashSetTest, TestLoadFactor) {
static constexpr size_t kStringCount = 1000;
static constexpr double kEpsilon = 0.01;
for (size_t i = 0; i < kStringCount; ++i) {
- hash_set.Insert(RandomString(i % 10 + 1));
+ hash_set.insert(RandomString(i % 10 + 1));
}
// Check that changing the load factor resizes the table to be within the target range.
EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
@@ -228,29 +230,29 @@ TEST_F(HashSetTest, TestStress) {
SetSeed(seed);
LOG(INFO) << "Starting stress test with seed " << seed;
for (size_t i = 0; i < operations; ++i) {
- ASSERT_EQ(hash_set.Size(), std_set.size());
+ ASSERT_EQ(hash_set.size(), std_set.size());
size_t delta = std::abs(static_cast<ssize_t>(target_size) -
- static_cast<ssize_t>(hash_set.Size()));
+ static_cast<ssize_t>(hash_set.size()));
size_t n = PRand();
if (n % target_size == 0) {
- hash_set.Clear();
+ hash_set.clear();
std_set.clear();
- ASSERT_TRUE(hash_set.Empty());
+ ASSERT_TRUE(hash_set.empty());
ASSERT_TRUE(std_set.empty());
} else if (n % target_size < delta) {
// Skew towards adding elements until we are at the desired size.
const std::string& s = strings[PRand() % string_count];
- hash_set.Insert(s);
+ hash_set.insert(s);
std_set.insert(s);
- ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+ ASSERT_EQ(*hash_set.find(s), *std_set.find(s));
} else {
const std::string& s = strings[PRand() % string_count];
- auto it1 = hash_set.Find(s);
+ auto it1 = hash_set.find(s);
auto it2 = std_set.find(s);
ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
if (it1 != hash_set.end()) {
ASSERT_EQ(*it1, *it2);
- hash_set.Erase(it1);
+ hash_set.erase(it1);
std_set.erase(it2);
}
}
@@ -268,13 +270,13 @@ struct IsEmptyStringPair {
TEST_F(HashSetTest, TestHashMap) {
HashMap<std::string, int, IsEmptyStringPair> hash_map;
- hash_map.Insert(std::make_pair(std::string("abcd"), 123));
- hash_map.Insert(std::make_pair(std::string("abcd"), 124));
- hash_map.Insert(std::make_pair(std::string("bags"), 444));
- auto it = hash_map.Find(std::string("abcd"));
+ hash_map.insert(std::make_pair(std::string("abcd"), 123));
+ hash_map.insert(std::make_pair(std::string("abcd"), 124));
+ hash_map.insert(std::make_pair(std::string("bags"), 444));
+ auto it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 123);
- hash_map.Erase(it);
- it = hash_map.Find(std::string("abcd"));
+ hash_map.erase(it);
+ it = hash_map.find(std::string("abcd"));
ASSERT_EQ(it->second, 124);
}
@@ -325,33 +327,50 @@ struct VectorIntHashEquals {
TEST_F(HashSetTest, TestLookupByAlternateKeyType) {
HashSet<std::vector<int>, IsEmptyFnVectorInt, VectorIntHashEquals, VectorIntHashEquals> hash_set;
- hash_set.Insert(std::vector<int>({1, 2, 3, 4}));
- hash_set.Insert(std::vector<int>({4, 2}));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::vector<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::vector<int>({1, 2, 3, 4})));
- ASSERT_EQ(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 1, 1, 1})));
- ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
+ hash_set.insert(std::vector<int>({1, 2, 3, 4}));
+ hash_set.insert(std::vector<int>({4, 2}));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::vector<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::vector<int>({1, 2, 3, 4})));
+ ASSERT_EQ(hash_set.end(), hash_set.find(std::forward_list<int>({1, 1, 1, 1})));
+ ASSERT_NE(hash_set.end(), hash_set.find(std::forward_list<int>({1, 2, 3, 4})));
}
TEST_F(HashSetTest, TestReserve) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
for (size_t size : sizes) {
- hash_set.Reserve(size);
+ hash_set.reserve(size);
const size_t buckets_before = hash_set.NumBuckets();
// Check that we expanded enough.
CHECK_GE(hash_set.ElementsUntilExpand(), size);
// Try inserting elements until we are at our reserve size and ensure the hash set did not
// expand.
- while (hash_set.Size() < size) {
- hash_set.Insert(std::to_string(hash_set.Size()));
+ while (hash_set.size() < size) {
+ hash_set.insert(std::to_string(hash_set.size()));
}
CHECK_EQ(hash_set.NumBuckets(), buckets_before);
}
// Check the behaviour for shrinking, it does not necessarily resize down.
constexpr size_t size = 100;
- hash_set.Reserve(size);
+ hash_set.reserve(size);
CHECK_GE(hash_set.ElementsUntilExpand(), size);
}
+TEST_F(HashSetTest, IteratorConversion) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator it = hash_set.insert(test_string);
+ HashSet<std::string>::const_iterator cit = it;
+ ASSERT_TRUE(it == cit);
+ ASSERT_EQ(*it, *cit);
+}
+
+TEST_F(HashSetTest, StringSearchyStringPiece) {
+ const char* test_string = "dummy";
+ HashSet<std::string> hash_set;
+ HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string);
+ HashSet<std::string>::iterator it = hash_set.find(StringPiece(test_string));
+ ASSERT_TRUE(it == insert_pos);
+}
+
} // namespace art
diff --git a/libartbase/base/indenter.h b/libartbase/base/indenter.h
index 06e7340d36..a479b7d650 100644
--- a/libartbase/base/indenter.h
+++ b/libartbase/base/indenter.h
@@ -122,6 +122,10 @@ class VariableIndentationOutputStream {
return indented_os_;
}
+ size_t GetIndentation() const {
+ return indenter_.count_;
+ }
+
void IncreaseIndentation(size_t adjustment) {
indenter_.count_ += adjustment;
}
diff --git a/libartbase/base/iteration_range.h b/libartbase/base/iteration_range.h
index 76049a7e4d..cd87d85f68 100644
--- a/libartbase/base/iteration_range.h
+++ b/libartbase/base/iteration_range.h
@@ -39,9 +39,9 @@ class IterationRange {
iterator cbegin() const { return first_; }
iterator cend() const { return last_; }
- private:
- const iterator first_;
- const iterator last_;
+ protected:
+ iterator first_;
+ iterator last_;
};
template <typename Iter>
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 144b06ceb9..15a5d71a6b 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -53,7 +53,7 @@ MallocArena::MallocArena(size_t size) {
memory_ = unaligned_memory_;
} else {
memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (kRunningOnMemoryTool) {
size_t head = memory_ - unaligned_memory_;
size_t tail = overallocation - head;
MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
@@ -66,7 +66,7 @@ MallocArena::MallocArena(size_t size) {
MallocArena::~MallocArena() {
constexpr size_t overallocation = RequiredOverallocation();
- if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (overallocation != 0u && kRunningOnMemoryTool) {
size_t head = memory_ - unaligned_memory_;
size_t tail = overallocation - head;
MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
@@ -132,7 +132,7 @@ size_t MallocArenaPool::GetBytesAllocated() const {
}
void MallocArenaPool::FreeArenaChain(Arena* first) {
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (kRunningOnMemoryTool) {
for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
}
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index c455fed829..5cea869519 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -19,7 +19,7 @@
#include <inttypes.h>
#include <stdlib.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#ifndef ANDROID_OS
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
#include <sys/resource.h>
#endif
@@ -29,7 +29,12 @@
#include "android-base/stringprintf.h"
#include "android-base/unique_fd.h"
+
+#if !defined(__Fuchsia__)
#include "cutils/ashmem.h"
+#else
+#include "fuchsia_compat.h"
+#endif
#include "allocator.h"
#include "bit_utils.h"
@@ -161,7 +166,7 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string*
// non-null, we check that pointer is the actual_ptr == expected_ptr,
// and if not, report in error_msg what the conflict mapping was if
// found, or a generic error in other cases.
-static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
+bool MemMap::CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
std::string* error_msg) {
// Handled first by caller for more specific error messages.
CHECK(actual_ptr != MAP_FAILED);
@@ -178,7 +183,7 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
}
// We asked for an address but didn't get what we wanted, all paths below here should fail.
- int result = munmap(actual_ptr, byte_count);
+ int result = TargetMUnmap(actual_ptr, byte_count);
if (result == -1) {
PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
}
@@ -207,18 +212,18 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
}
#if USE_ART_LOW_4G_ALLOCATOR
-static inline void* TryMemMapLow4GB(void* ptr,
+void* MemMap::TryMemMapLow4GB(void* ptr,
size_t page_aligned_byte_count,
int prot,
int flags,
int fd,
off_t offset) {
- void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
+ void* actual = TargetMMap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
if (actual != MAP_FAILED) {
// Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
// 4GB. If this is the case, unmap and retry.
if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
- munmap(actual, page_aligned_byte_count);
+ TargetMUnmap(actual, page_aligned_byte_count);
actual = MAP_FAILED;
}
}
@@ -237,7 +242,7 @@ MemMap* MemMap::MapAnonymous(const char* name,
#ifndef __LP64__
UNUSED(low_4gb);
#endif
- use_ashmem = use_ashmem && !kIsTargetLinux;
+ use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
@@ -460,7 +465,7 @@ MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
(expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
size_t redzone_size = 0;
- if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
+ if (kRunningOnMemoryTool && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
redzone_size = kPageSize;
page_aligned_byte_count += redzone_size;
}
@@ -521,7 +526,7 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
if (!already_unmapped_) {
- int result = munmap(base_begin_, base_size_);
+ int result = TargetMUnmap(base_begin_, base_size_);
if (result == -1) {
PLOG(FATAL) << "munmap failed";
}
@@ -565,7 +570,7 @@ MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_
MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
std::string* error_msg, bool use_ashmem) {
- use_ashmem = use_ashmem && !kIsTargetLinux;
+ use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
DCHECK_GE(new_end, Begin());
DCHECK_LE(new_end, End());
DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
@@ -607,7 +612,7 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
// Unmap/map the tail region.
- int result = munmap(tail_base_begin, tail_base_size);
+ int result = TargetMUnmap(tail_base_begin, tail_base_size);
if (result == -1) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
@@ -618,12 +623,12 @@ MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_pro
// calls. Otherwise, libc (or something else) might take this memory
// region. Note this isn't perfect as there's no way to prevent
// other threads to try to take this memory region here.
- uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
- tail_base_size,
- tail_prot,
- flags,
- fd.get(),
- 0));
+ uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
+ tail_base_size,
+ tail_prot,
+ flags,
+ fd.get(),
+ 0));
if (actual == MAP_FAILED) {
PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
*error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
@@ -647,19 +652,11 @@ void MemMap::MadviseDontNeedAndZero() {
}
bool MemMap::Sync() {
- bool result;
- if (redzone_size_ != 0) {
- // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
- // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
- // noaccess protection from the msync range. b/27552451.
- uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
- MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
- result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
- MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
- } else {
- result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
- }
- return result;
+ // Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
+ // protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
+ // only accepts page-aligned base address, and excludes the higher-end noaccess protection
+ // from the msync range. b/27552451.
+ return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
}
bool MemMap::Protect(int prot) {
@@ -798,6 +795,8 @@ void MemMap::Init() {
std::lock_guard<std::mutex> mu(*mem_maps_lock_);
DCHECK(gMaps == nullptr);
gMaps = new Maps;
+
+ TargetMMapInit();
}
void MemMap::Shutdown() {
@@ -829,8 +828,10 @@ void MemMap::SetSize(size_t new_size) {
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
new_base_size),
base_size_ - new_base_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
- base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ CHECK_EQ(TargetMUnmap(reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0)
+ << new_base_size << " " << base_size_;
base_size_ = new_base_size;
size_ = new_size;
}
@@ -976,7 +977,7 @@ void* MemMap::MapInternal(void* addr,
if (orig_prot != prot_non_exec) {
if (mprotect(actual, length, orig_prot) != 0) {
PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
- munmap(actual, length);
+ TargetMUnmap(actual, length);
errno = ENOMEM;
return MAP_FAILED;
}
@@ -984,14 +985,14 @@ void* MemMap::MapInternal(void* addr,
return actual;
}
- actual = mmap(addr, length, prot, flags, fd, offset);
+ actual = TargetMMap(addr, length, prot, flags, fd, offset);
#else
#if defined(__LP64__)
if (low_4gb && addr == nullptr) {
flags |= MAP_32BIT;
}
#endif
- actual = mmap(addr, length, prot, flags, fd, offset);
+ actual = TargetMMap(addr, length, prot, flags, fd, offset);
#endif
return actual;
}
@@ -1067,13 +1068,13 @@ void MemMap::AlignBy(size_t size) {
// Unmap the unaligned parts.
if (base_begin < aligned_base_begin) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
- CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
+ CHECK_EQ(TargetMUnmap(base_begin, aligned_base_begin - base_begin), 0)
<< "base_begin=" << reinterpret_cast<void*>(base_begin)
<< " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
}
if (aligned_base_end < base_end) {
MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
- CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
+ CHECK_EQ(TargetMUnmap(aligned_base_end, base_end - aligned_base_end), 0)
<< "base_end=" << reinterpret_cast<void*>(base_end)
<< " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
}
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 3a324b2dc5..1979357714 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,10 +29,11 @@
namespace art {
-#if defined(__LP64__) && (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && \
+ (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
#define USE_ART_LOW_4G_ALLOCATOR 1
#else
-#if defined(__LP64__) && !defined(__x86_64__)
+#if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
#error "Unrecognized 64-bit architecture."
#endif
#define USE_ART_LOW_4G_ALLOCATOR 0
@@ -264,6 +265,12 @@ class MemMap {
off_t offset)
REQUIRES(!MemMap::mem_maps_lock_);
+ // member function to access real_munmap
+ static bool CheckMapRequest(uint8_t* expected_ptr,
+ void* actual_ptr,
+ size_t byte_count,
+ std::string* error_msg);
+
const std::string name_;
uint8_t* begin_; // Start of data. May be changed by AlignBy.
size_t size_; // Length of data.
@@ -284,8 +291,19 @@ class MemMap {
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
+
+ static void* TryMemMapLow4GB(void* ptr,
+ size_t page_aligned_byte_count,
+ int prot,
+ int flags,
+ int fd,
+ off_t offset);
#endif
+ static void TargetMMapInit();
+ static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
+ static int TargetMUnmap(void* start, size_t len);
+
static std::mutex* mem_maps_lock_;
friend class MemMapTest; // To allow access to base_begin_ and base_size_.
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
new file mode 100644
index 0000000000..db31efb1c0
--- /dev/null
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+#include <sys/mman.h>
+#include "logging.h"
+
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace art {
+
+static zx_handle_t fuchsia_lowmem_vmar = ZX_HANDLE_INVALID;
+static zx_vaddr_t fuchsia_lowmem_base = 0;
+static size_t fuchsia_lowmem_size = 0;
+
+static const char map_name[] = "mmap-android";
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_START = 0x80000000;
+static constexpr uintptr_t FUCHSIA_LOWER_MEM_SIZE = 0x60000000;
+
+void MemMap::TargetMMapInit() {
+ if (fuchsia_lowmem_vmar != ZX_HANDLE_INVALID) {
+ return;
+ }
+
+ zx_info_vmar_t vmarinfo;
+ CHECK_EQ(zx_object_get_info(zx_vmar_root_self(),
+ ZX_INFO_VMAR,
+ &vmarinfo,
+ sizeof(vmarinfo),
+ NULL,
+ NULL), ZX_OK) << "could not find info from root vmar";
+
+ uintptr_t lower_mem_start = FUCHSIA_LOWER_MEM_START - vmarinfo.base;
+ fuchsia_lowmem_size = FUCHSIA_LOWER_MEM_SIZE;
+ uint32_t allocflags = ZX_VM_FLAG_CAN_MAP_READ |
+ ZX_VM_FLAG_CAN_MAP_WRITE |
+ ZX_VM_FLAG_CAN_MAP_EXECUTE |
+ ZX_VM_FLAG_SPECIFIC;
+ CHECK_EQ(zx_vmar_allocate(zx_vmar_root_self(),
+ lower_mem_start,
+ fuchsia_lowmem_size,
+ allocflags,
+ &fuchsia_lowmem_vmar,
+ &fuchsia_lowmem_base), ZX_OK) << "could not allocate lowmem vmar";
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+ zx_status_t status;
+ uintptr_t mem = 0;
+
+ bool mmap_lower = (flags & MAP_32BIT) != 0;
+
+ // for file-based mapping use system library
+ if ((flags & MAP_ANONYMOUS) == 0) {
+ if (start != nullptr) {
+ flags |= MAP_FIXED;
+ }
+ CHECK(!mmap_lower) << "cannot map files into low memory for Fuchsia";
+ return mmap(start, len, prot, flags, fd, fd_off);
+ }
+
+ uint32_t vmarflags = 0;
+ if ((prot & PROT_READ) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_READ;
+ }
+ if ((prot & PROT_WRITE) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_WRITE;
+ }
+ if ((prot & PROT_EXEC) != 0) {
+ vmarflags |= ZX_VM_FLAG_PERM_EXECUTE;
+ }
+
+ if (len == 0) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+
+ zx_info_vmar_t vmarinfo;
+ size_t vmaroffset = 0;
+ if (start != nullptr) {
+ vmarflags |= ZX_VM_FLAG_SPECIFIC;
+ status = zx_object_get_info((mmap_lower ? fuchsia_lowmem_vmar : zx_vmar_root_self()),
+ ZX_INFO_VMAR,
+ &vmarinfo,
+ sizeof(vmarinfo),
+ NULL,
+ NULL);
+ if (status < 0 || reinterpret_cast<uintptr_t>(start) < vmarinfo.base) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+ vmaroffset = reinterpret_cast<uintptr_t>(start) - vmarinfo.base;
+ }
+
+ zx_handle_t vmo;
+ if (zx_vmo_create(len, 0, &vmo) < 0) {
+ errno = ENOMEM;
+ return MAP_FAILED;
+ }
+ zx_vmo_get_size(vmo, &len);
+ zx_object_set_property(vmo, ZX_PROP_NAME, map_name, strlen(map_name));
+
+ if (mmap_lower) {
+ status = zx_vmar_map(fuchsia_lowmem_vmar, vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+ } else {
+ status = zx_vmar_map(zx_vmar_root_self(), vmaroffset, vmo, fd_off, len, vmarflags, &mem);
+ }
+ zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ return MAP_FAILED;
+ }
+
+ return reinterpret_cast<void *>(mem);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(start);
+ zx_handle_t alloc_vmar = zx_vmar_root_self();
+ if (addr >= fuchsia_lowmem_base && addr < fuchsia_lowmem_base + fuchsia_lowmem_size) {
+ alloc_vmar = fuchsia_lowmem_vmar;
+ }
+ zx_status_t status = zx_vmar_unmap(alloc_vmar, addr, len);
+ if (status < 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index d956126df1..c575c7a31f 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -471,31 +471,32 @@ TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
// cannot allocate in the 2GB-4GB region.
TEST_DISABLED_FOR_MIPS();
+ // This test does not work under AddressSanitizer.
+ // Historical note: This test did not work under Valgrind either.
+ TEST_DISABLED_FOR_MEMORY_TOOL();
+
CommonInit();
- // This test may not work under valgrind.
- if (RUNNING_ON_MEMORY_TOOL == 0) {
- constexpr size_t size = 0x100000;
- // Try all addresses starting from 2GB to 4GB.
- size_t start_addr = 2 * GB;
- std::string error_msg;
- std::unique_ptr<MemMap> map;
- for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
- map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
- reinterpret_cast<uint8_t*>(start_addr),
- size,
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- false,
- &error_msg));
- if (map != nullptr) {
- break;
- }
+ constexpr size_t size = 0x100000;
+ // Try all addresses starting from 2GB to 4GB.
+ size_t start_addr = 2 * GB;
+ std::string error_msg;
+ std::unique_ptr<MemMap> map;
+ for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
+ map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+ reinterpret_cast<uint8_t*>(start_addr),
+ size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ false,
+ &error_msg));
+ if (map != nullptr) {
+ break;
}
- ASSERT_TRUE(map.get() != nullptr) << error_msg;
- ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
- ASSERT_TRUE(error_msg.empty());
- ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
}
+ ASSERT_TRUE(map.get() != nullptr) << error_msg;
+ ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
}
TEST_F(MemMapTest, MapAnonymousOverflow) {
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
new file mode 100644
index 0000000000..601b049525
--- /dev/null
+++ b/libartbase/base/mem_map_unix.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mem_map.h"
+
+#include <sys/mman.h>
+
+namespace art {
+
+void MemMap::TargetMMapInit() {
+ // no-op for unix
+}
+
+void* MemMap::TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off) {
+ return mmap(start, len, prot, flags, fd, fd_off);
+}
+
+int MemMap::TargetMUnmap(void* start, size_t len) {
+ return munmap(start, len);
+}
+
+} // namespace art
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index e1df99fed4..d381f010f5 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -19,53 +19,53 @@
#include <stddef.h>
+namespace art {
+
#if !defined(__has_feature)
-#define __has_feature(x) 0
+# define __has_feature(x) 0
#endif
#if __has_feature(address_sanitizer)
-#include <sanitizer/asan_interface.h>
-#define ADDRESS_SANITIZER
+# include <sanitizer/asan_interface.h>
+# define ADDRESS_SANITIZER
-#ifdef ART_ENABLE_ADDRESS_SANITIZER
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+# ifdef ART_ENABLE_ADDRESS_SANITIZER
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
constexpr bool kMemoryToolIsAvailable = true;
-#else
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# else
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
constexpr bool kMemoryToolIsAvailable = false;
-#endif
+# endif
extern "C" void __asan_handle_no_return();
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
-#define RUNNING_ON_MEMORY_TOOL 1U
-constexpr bool kMemoryToolIsValgrind = false;
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+# define MEMORY_TOOL_HANDLE_NO_RETURN __asan_handle_no_return()
+constexpr bool kRunningOnMemoryTool = true;
constexpr bool kMemoryToolDetectsLeaks = true;
constexpr bool kMemoryToolAddsRedzones = true;
constexpr size_t kMemoryToolStackGuardSizeScale = 2;
#else
-#include <memcheck/memcheck.h>
-#include <valgrind.h>
-#define MEMORY_TOOL_MAKE_NOACCESS(p, s) VALGRIND_MAKE_MEM_NOACCESS(p, s)
-#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) VALGRIND_MAKE_MEM_UNDEFINED(p, s)
-#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
-#define ATTRIBUTE_NO_SANITIZE_ADDRESS
-#define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
-#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
-constexpr bool kMemoryToolIsAvailable = true;
-constexpr bool kMemoryToolIsValgrind = true;
-constexpr bool kMemoryToolDetectsLeaks = true;
-constexpr bool kMemoryToolAddsRedzones = true;
+# define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+# define ATTRIBUTE_NO_SANITIZE_ADDRESS
+# define MEMORY_TOOL_HANDLE_NO_RETURN do { } while (0)
+constexpr bool kRunningOnMemoryTool = false;
+constexpr bool kMemoryToolIsAvailable = false;
+constexpr bool kMemoryToolDetectsLeaks = false;
+constexpr bool kMemoryToolAddsRedzones = false;
constexpr size_t kMemoryToolStackGuardSizeScale = 1;
#endif
+} // namespace art
+
#endif // ART_LIBARTBASE_BASE_MEMORY_TOOL_H_
diff --git a/libartbase/base/scoped_arena_containers.h b/libartbase/base/scoped_arena_containers.h
index 44d7ebbc96..80144d2c09 100644
--- a/libartbase/base/scoped_arena_containers.h
+++ b/libartbase/base/scoped_arena_containers.h
@@ -66,15 +66,15 @@ using ScopedArenaSafeMap =
template <typename T,
typename EmptyFn = DefaultEmptyFn<T>,
- typename HashFn = std::hash<T>,
- typename Pred = std::equal_to<T>>
+ typename HashFn = DefaultHashFn<T>,
+ typename Pred = DefaultPred<T>>
using ScopedArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ScopedArenaAllocatorAdapter<T>>;
template <typename Key,
typename Value,
typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
- typename HashFn = std::hash<Key>,
- typename Pred = std::equal_to<Key>>
+ typename HashFn = DefaultHashFn<Key>,
+ typename Pred = DefaultPred<Key>>
using ScopedArenaHashMap = HashMap<Key,
Value,
EmptyFn,
@@ -236,7 +236,7 @@ class ArenaDelete {
protected:
// Used for variable sized objects such as RegisterLine.
ALWAYS_INLINE void ProtectMemory(T* ptr, size_t size) const {
- if (RUNNING_ON_MEMORY_TOOL > 0) {
+ if (kRunningOnMemoryTool) {
// Writing to the memory will fail ift we already destroyed the pointer with
// DestroyOnlyDelete since we make it no access.
memset(ptr, kMagicFill, size);
diff --git a/libartbase/base/stats.h b/libartbase/base/stats.h
new file mode 100644
index 0000000000..4dcbfe81c6
--- /dev/null
+++ b/libartbase/base/stats.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_STATS_H_
+#define ART_LIBARTBASE_BASE_STATS_H_
+
+#include <unordered_map>
+
+#include "globals.h"
+
+namespace art {
+
+// Simple structure to record tree of statistical values.
+class Stats {
+ public:
+ double Value() const { return value_; }
+ size_t Count() const { return count_; }
+ Stats* Child(const char* name) { return &children_[name]; }
+ const std::unordered_map<const char*, Stats>& Children() const { return children_; }
+
+ void AddBytes(double bytes, size_t count = 1) { Add(bytes, count); }
+ void AddBits(double bits, size_t count = 1) { Add(bits / kBitsPerByte, count); }
+ void AddSeconds(double s, size_t count = 1) { Add(s, count); }
+ void AddNanoSeconds(double ns, size_t count = 1) { Add(ns / 1000000000.0, count); }
+
+ double SumChildrenValues() const {
+ double sum = 0.0;
+ for (auto it : children_) {
+ sum += it.second.Value();
+ }
+ return sum;
+ }
+
+ private:
+ void Add(double value, size_t count = 1) {
+ value_ += value;
+ count_ += count;
+ }
+
+ double value_ = 0.0; // Commutative sum of the collected statistic in basic units.
+ size_t count_ = 0; // The number of samples for this node.
+ std::unordered_map<const char*, Stats> children_;
+};
+
+} // namespace art
+
+#endif // ART_LIBARTBASE_BASE_STATS_H_
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 73c1c226f9..6e3b78e12c 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -244,20 +244,6 @@ static inline void CheckedCall(const Func& function, const char* what, Args... a
}
}
-// Hash bytes using a relatively fast hash.
-static inline size_t HashBytes(const uint8_t* data, size_t len) {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
-}
-
} // namespace art
#endif // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/variant_map_test.cc b/libartbase/base/variant_map_test.cc
index 4677b6d3b3..f2da3389b1 100644
--- a/libartbase/base/variant_map_test.cc
+++ b/libartbase/base/variant_map_test.cc
@@ -108,7 +108,7 @@ TEST(VariantMaps, RuleOfFive) {
EXPECT_EQ(size_t(2), fmFilled.Size());
// Test copy constructor
- FruitMap fmEmptyCopy(fmEmpty);
+ FruitMap fmEmptyCopy(fmEmpty); // NOLINT
EXPECT_EQ(size_t(0), fmEmptyCopy.Size());
// Test copy constructor
diff --git a/libdexfile/dex/class_accessor-inl.h b/libdexfile/dex/class_accessor-inl.h
index 3bb9e93e5a..dd91438ff7 100644
--- a/libdexfile/dex/class_accessor-inl.h
+++ b/libdexfile/dex/class_accessor-inl.h
@@ -26,12 +26,15 @@
namespace art {
inline ClassAccessor::ClassAccessor(const ClassIteratorData& data)
- : ClassAccessor(data.dex_file_, data.dex_file_.GetClassDef(data.class_def_idx_)) {}
+ : ClassAccessor(data.dex_file_, data.class_def_idx_) {}
inline ClassAccessor::ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def)
+ : ClassAccessor(dex_file, dex_file.GetIndexForClassDef(class_def)) {}
+
+inline ClassAccessor::ClassAccessor(const DexFile& dex_file, uint32_t class_def_index)
: dex_file_(dex_file),
- descriptor_index_(class_def.class_idx_),
- ptr_pos_(dex_file.GetClassData(class_def)),
+ class_def_index_(class_def_index),
+ ptr_pos_(dex_file.GetClassData(dex_file.GetClassDef(class_def_index))),
num_static_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
num_instance_fields_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
num_direct_methods_(ptr_pos_ != nullptr ? DecodeUnsignedLeb128(&ptr_pos_) : 0u),
@@ -107,8 +110,12 @@ inline CodeItemInstructionAccessor ClassAccessor::Method::GetInstructions() cons
return CodeItemInstructionAccessor(dex_file_, dex_file_.GetCodeItem(GetCodeItemOffset()));
}
+inline CodeItemDataAccessor ClassAccessor::Method::GetInstructionsAndData() const {
+ return CodeItemDataAccessor(dex_file_, dex_file_.GetCodeItem(GetCodeItemOffset()));
+}
+
inline const char* ClassAccessor::GetDescriptor() const {
- return dex_file_.StringByTypeIdx(descriptor_index_);
+ return dex_file_.StringByTypeIdx(GetClassIdx());
}
inline const DexFile::CodeItem* ClassAccessor::Method::GetCodeItem() const {
@@ -175,6 +182,10 @@ inline void ClassAccessor::Method::UnHideAccessFlags() const {
DexFile::UnHideAccessFlags(const_cast<uint8_t*>(ptr_pos_), GetAccessFlags(), /*is_method*/ true);
}
+inline dex::TypeIndex ClassAccessor::GetClassIdx() const {
+ return dex_file_.GetClassDef(class_def_index_).class_idx_;
+}
+
} // namespace art
#endif // ART_LIBDEXFILE_DEX_CLASS_ACCESSOR_INL_H_
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index 4f0fd32e31..0d87f93d60 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -78,6 +78,7 @@ class ClassAccessor {
}
CodeItemInstructionAccessor GetInstructions() const;
+ CodeItemDataAccessor GetInstructionsAndData() const;
const DexFile::CodeItem* GetCodeItem() const;
@@ -248,6 +249,8 @@ class ClassAccessor {
ClassAccessor(const DexFile& dex_file, const DexFile::ClassDef& class_def);
+ ClassAccessor(const DexFile& dex_file, uint32_t class_def_index);
+
// Return the code item for a method.
const DexFile::CodeItem* GetCodeItem(const Method& method) const;
@@ -315,9 +318,7 @@ class ClassAccessor {
const char* GetDescriptor() const;
- dex::TypeIndex GetClassIdx() const {
- return descriptor_index_;
- }
+ dex::TypeIndex GetClassIdx() const;
const DexFile& GetDexFile() const {
return dex_file_;
@@ -327,6 +328,10 @@ class ClassAccessor {
return ptr_pos_ != nullptr;
}
+ uint32_t GetClassDefIndex() const {
+ return class_def_index_;
+ }
+
protected:
// Template visitor to reduce copy paste for visiting elements.
// No thread safety analysis since the visitor may require capabilities.
@@ -341,7 +346,7 @@ class ClassAccessor {
IterationRange<DataIterator<Method>> GetMethodsInternal(size_t count) const;
const DexFile& dex_file_;
- const dex::TypeIndex descriptor_index_ = {};
+ const uint32_t class_def_index_;
const uint8_t* ptr_pos_ = nullptr; // Pointer into stream of class_data_item.
const uint32_t num_static_fields_ = 0u;
const uint32_t num_instance_fields_ = 0u;
diff --git a/libdexfile/dex/class_accessor_test.cc b/libdexfile/dex/class_accessor_test.cc
index d0533c1811..1f30ae54d6 100644
--- a/libdexfile/dex/class_accessor_test.cc
+++ b/libdexfile/dex/class_accessor_test.cc
@@ -30,8 +30,9 @@ TEST_F(ClassAccessorTest, TestVisiting) {
uint32_t class_def_idx = 0u;
ASSERT_GT(dex_file->NumClassDefs(), 0u);
for (ClassAccessor accessor : dex_file->GetClasses()) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_idx);
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(accessor.GetClassDefIndex());
EXPECT_EQ(accessor.GetDescriptor(), dex_file->StringByTypeIdx(class_def.class_idx_));
+ EXPECT_EQ(class_def_idx, accessor.GetClassDefIndex());
++class_def_idx;
// Check iterators against visitors.
auto methods = accessor.GetMethods();
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index f5dd374253..09668594dd 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -17,12 +17,14 @@
#ifndef ART_LIBDEXFILE_DEX_DEX_FILE_INL_H_
#define ART_LIBDEXFILE_DEX_DEX_FILE_INL_H_
+#include "dex_file.h"
+
#include "base/casts.h"
#include "base/leb128.h"
#include "base/stringpiece.h"
#include "class_iterator.h"
#include "compact_dex_file.h"
-#include "dex_file.h"
+#include "dex_instruction_iterator.h"
#include "invoke_type.h"
#include "standard_dex_file.h"
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index f570158dfb..f1f896058c 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -605,6 +605,15 @@ std::string DexFile::PrettyType(dex::TypeIndex type_idx) const {
return PrettyDescriptor(GetTypeDescriptor(type_id));
}
+dex::ProtoIndex DexFile::GetProtoIndexForCallSite(uint32_t call_site_idx) const {
+ const DexFile::CallSiteIdItem& csi = GetCallSiteId(call_site_idx);
+ CallSiteArrayValueIterator it(*this, csi);
+ it.Next();
+ it.Next();
+ DCHECK_EQ(EncodedArrayValueIterator::ValueType::kMethodType, it.GetValueType());
+ return dex::ProtoIndex(it.GetJavaValue().i);
+}
+
// Checks that visibility is as expected. Includes special behavior for M and
// before to allow runtime and build visibility when expecting runtime.
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index ed219808d2..4e88ef6985 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -29,7 +29,6 @@
#include "base/value_object.h"
#include "class_iterator.h"
#include "dex_file_types.h"
-#include "dex_instruction_iterator.h"
#include "hidden_api_access_flags.h"
#include "jni.h"
#include "modifiers.h"
@@ -38,6 +37,7 @@ namespace art {
class ClassDataItemIterator;
class CompactDexFile;
+class DexInstructionIterator;
enum InvokeType : uint32_t;
class MemMap;
class OatDexFile;
@@ -737,6 +737,8 @@ class DexFile {
return DataBegin() + call_site_id.data_off_;
}
+ dex::ProtoIndex GetProtoIndexForCallSite(uint32_t call_site_idx) const;
+
static const TryItem* GetTryItems(const DexInstructionIterator& code_item_end, uint32_t offset);
// Get the base of the encoded data for the given DexCode.
@@ -1198,6 +1200,9 @@ class ClassDataItemIterator {
bool IsAtMethod() const {
return pos_ >= EndOfInstanceFieldsPos();
}
+ bool IsAtVirtualMethod() const {
+ return pos_ >= EndOfDirectMethodsPos();
+ }
bool HasNextStaticField() const {
return pos_ < EndOfStaticFieldsPos();
}
diff --git a/libdexfile/dex/dex_file_tracking_registrar.cc b/libdexfile/dex/dex_file_tracking_registrar.cc
index 78ea9c16cb..551bea108c 100644
--- a/libdexfile/dex/dex_file_tracking_registrar.cc
+++ b/libdexfile/dex/dex_file_tracking_registrar.cc
@@ -130,7 +130,8 @@ inline void SetRegistrationRange(const void* begin, size_t size, bool should_poi
MEMORY_TOOL_MAKE_NOACCESS(begin, size);
} else {
// Note: MEMORY_TOOL_MAKE_UNDEFINED has the same functionality with Address
- // Sanitizer. The difference has not been tested with Valgrind
+ // Sanitizer.
+ // Historical note: The difference has not been tested with Valgrind.
MEMORY_TOOL_MAKE_DEFINED(begin, size);
}
}
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 78db8b9a35..d4359458d3 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -1746,8 +1746,8 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t section_c
ErrorStringPrintf("Item %d offset is 0", i);
return false;
}
- DCHECK(offset_to_type_map_.Find(aligned_offset) == offset_to_type_map_.end());
- offset_to_type_map_.Insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
+ DCHECK(offset_to_type_map_.find(aligned_offset) == offset_to_type_map_.end());
+ offset_to_type_map_.insert(std::pair<uint32_t, uint16_t>(aligned_offset, kType));
}
aligned_offset = ptr_ - begin_;
@@ -1951,7 +1951,7 @@ bool DexFileVerifier::CheckIntraSection() {
bool DexFileVerifier::CheckOffsetToTypeMap(size_t offset, uint16_t type) {
DCHECK_NE(offset, 0u);
- auto it = offset_to_type_map_.Find(offset);
+ auto it = offset_to_type_map_.find(offset);
if (UNLIKELY(it == offset_to_type_map_.end())) {
ErrorStringPrintf("No data map entry found @ %zx; expected %x", offset, type);
return false;
diff --git a/libdexfile/dex/dex_instruction_test.cc b/libdexfile/dex/dex_instruction_test.cc
index c944085b9e..6ce9dbafc8 100644
--- a/libdexfile/dex/dex_instruction_test.cc
+++ b/libdexfile/dex/dex_instruction_test.cc
@@ -135,7 +135,7 @@ TEST(Instruction, PropertiesOf4rcc) {
static void Build35c(uint16_t* out,
Instruction::Code code,
uint16_t method_idx,
- std::vector<uint16_t> args) {
+ const std::vector<uint16_t>& args) {
out[0] = 0;
out[0] |= (args.size() << 12);
out[0] |= static_cast<uint16_t>(code);
@@ -152,7 +152,7 @@ static void Build35c(uint16_t* out,
static std::string DumpInst35c(Instruction::Code code,
uint16_t method_idx,
- std::vector<uint16_t> args) {
+ const std::vector<uint16_t>& args) {
uint16_t inst[6] = {};
Build35c(inst, code, method_idx, args);
return Instruction::At(inst)->DumpString(nullptr);
diff --git a/libdexfile/dex/invoke_type.h b/libdexfile/dex/invoke_type.h
index 9b3af673a8..1740c079bb 100644
--- a/libdexfile/dex/invoke_type.h
+++ b/libdexfile/dex/invoke_type.h
@@ -28,7 +28,8 @@ enum InvokeType : uint32_t {
kSuper, // <<super>>
kInterface, // <<interface>>
kPolymorphic, // <<polymorphic>>
- kMaxInvokeType = kPolymorphic
+ kCustom, // <<custom>>
+ kMaxInvokeType = kCustom
};
std::ostream& operator<<(std::ostream& os, const InvokeType& rhs);
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 748e24e27c..6f49adf718 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -57,7 +57,7 @@ const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0'
// The name of the profile entry in the dex metadata file.
// DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
-const char* ProfileCompilationInfo::kDexMetadataProfileEntry = "primary.prof";
+const char ProfileCompilationInfo::kDexMetadataProfileEntry[] = "primary.prof";
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -1181,8 +1181,8 @@ ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::OpenSource(
// Allow archives without the profile entry. In this case, create an empty profile.
// This gives more flexible when ure-using archives that may miss the entry.
// (e.g. dex metadata files)
- LOG(WARNING) << std::string("Could not find entry ") + kDexMetadataProfileEntry +
- " in the zip archive. Creating an empty profile.";
+ LOG(WARNING) << "Could not find entry " << kDexMetadataProfileEntry
+ << " in the zip archive. Creating an empty profile.";
source->reset(ProfileSource::Create(nullptr));
return kProfileLoadSuccess;
}
@@ -1383,7 +1383,7 @@ bool ProfileCompilationInfo::RemapProfileIndex(
// the current profile info.
// Note that the number of elements should be very small, so this should not
// be a performance issue.
- for (const ProfileLineHeader other_profile_line_header : profile_line_headers) {
+ for (const ProfileLineHeader& other_profile_line_header : profile_line_headers) {
if (!filter_fn(other_profile_line_header.dex_location, other_profile_line_header.checksum)) {
continue;
}
@@ -2021,9 +2021,9 @@ ProfileCompilationInfo::FindOrAddDexPc(InlineCacheMap* inline_cache, uint32_t de
return &(inline_cache->FindOrAdd(dex_pc, DexPcData(&allocator_))->second);
}
-std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
+HashSet<std::string> ProfileCompilationInfo::GetClassDescriptors(
const std::vector<const DexFile*>& dex_files) {
- std::unordered_set<std::string> ret;
+ HashSet<std::string> ret;
for (const DexFile* dex_file : dex_files) {
const DexFileData* data = FindDexData(dex_file);
if (data != nullptr) {
@@ -2032,7 +2032,7 @@ std::unordered_set<std::string> ProfileCompilationInfo::GetClassDescriptors(
// Something went bad. The profile is probably corrupted. Abort and return an emtpy set.
LOG(WARNING) << "Corrupted profile: invalid type index "
<< type_idx.index_ << " in dex " << dex_file->GetLocation();
- return std::unordered_set<std::string>();
+ return HashSet<std::string>();
}
const DexFile::TypeId& type_id = dex_file->GetTypeId(type_idx);
ret.insert(dex_file->GetTypeDescriptor(type_id));
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index e28c5f17b6..3596f3e5a6 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -24,6 +24,7 @@
#include "base/arena_object.h"
#include "base/atomic.h"
#include "base/bit_memory_region.h"
+#include "base/hash_set.h"
#include "base/malloc_arena_pool.h"
#include "base/mem_map.h"
#include "base/safe_map.h"
@@ -73,7 +74,7 @@ class ProfileCompilationInfo {
static const uint8_t kProfileMagic[];
static const uint8_t kProfileVersion[];
- static const char* kDexMetadataProfileEntry;
+ static const char kDexMetadataProfileEntry[];
static constexpr uint8_t kIndividualInlineCacheSize = 5;
@@ -426,7 +427,7 @@ class ProfileCompilationInfo {
ArenaAllocator* GetAllocator() { return &allocator_; }
// Return all of the class descriptors in the profile for a set of dex files.
- std::unordered_set<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
+ HashSet<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
// Return true if the fd points to a profile file.
bool IsProfileFile(int fd);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 25676f736c..21ce8c84c4 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -18,6 +18,7 @@
#include <stdlib.h>
#include <fstream>
+#include <iomanip>
#include <iostream>
#include <map>
#include <set>
@@ -37,6 +38,7 @@
#include "base/indenter.h"
#include "base/os.h"
#include "base/safe_map.h"
+#include "base/stats.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
@@ -113,10 +115,9 @@ const char* image_roots_descriptions_[] = {
};
// Map is so that we don't allocate multiple dex files for the same OatDexFile.
-static std::map<const OatFile::OatDexFile*,
- std::unique_ptr<const DexFile>> opened_dex_files;
+static std::map<const OatDexFile*, std::unique_ptr<const DexFile>> opened_dex_files;
-const DexFile* OpenDexFile(const OatFile::OatDexFile* oat_dex_file, std::string* error_msg) {
+const DexFile* OpenDexFile(const OatDexFile* oat_dex_file, std::string* error_msg) {
DCHECK(oat_dex_file != nullptr);
auto it = opened_dex_files.find(oat_dex_file);
if (it != opened_dex_files.end()) {
@@ -238,15 +239,15 @@ class OatSymbolizer FINAL {
}
void Walk() {
- std::vector<const OatFile::OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
+ std::vector<const OatDexFile*> oat_dex_files = oat_file_->GetOatDexFiles();
for (size_t i = 0; i < oat_dex_files.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files[i];
+ const OatDexFile* oat_dex_file = oat_dex_files[i];
CHECK(oat_dex_file != nullptr);
WalkOatDexFile(oat_dex_file);
}
}
- void WalkOatDexFile(const OatFile::OatDexFile* oat_dex_file) {
+ void WalkOatDexFile(const OatDexFile* oat_dex_file) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
if (dex_file == nullptr) {
@@ -274,7 +275,7 @@ class OatSymbolizer FINAL {
void WalkOatClass(const OatFile::OatClass& oat_class,
const DexFile& dex_file,
uint32_t class_def_index) {
- ClassAccessor accessor(dex_file, dex_file.GetClassDef(class_def_index));
+ ClassAccessor accessor(dex_file, class_def_index);
// Note: even if this is an interface or a native class, we still have to walk it, as there
// might be a static initializer.
uint32_t class_method_idx = 0;
@@ -527,7 +528,7 @@ class OatDumper {
// Dumping the dex file overview is compact enough to do even if header only.
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
@@ -544,28 +545,30 @@ class OatDumper {
os << "\n";
}
- // Dump .bss entries.
- DumpBssEntries(
- os,
- "ArtMethod",
- oat_dex_file->GetMethodBssMapping(),
- dex_file->NumMethodIds(),
- static_cast<size_t>(GetInstructionSetPointerSize(instruction_set_)),
- [=](uint32_t index) { return dex_file->PrettyMethod(index); });
- DumpBssEntries(
- os,
- "Class",
- oat_dex_file->GetTypeBssMapping(),
- dex_file->NumTypeIds(),
- sizeof(GcRoot<mirror::Class>),
- [=](uint32_t index) { return dex_file->PrettyType(dex::TypeIndex(index)); });
- DumpBssEntries(
- os,
- "String",
- oat_dex_file->GetStringBssMapping(),
- dex_file->NumStringIds(),
- sizeof(GcRoot<mirror::Class>),
- [=](uint32_t index) { return dex_file->StringDataByIdx(dex::StringIndex(index)); });
+ if (!options_.dump_header_only_) {
+ // Dump .bss entries.
+ DumpBssEntries(
+ os,
+ "ArtMethod",
+ oat_dex_file->GetMethodBssMapping(),
+ dex_file->NumMethodIds(),
+ static_cast<size_t>(GetInstructionSetPointerSize(instruction_set_)),
+ [=](uint32_t index) { return dex_file->PrettyMethod(index); });
+ DumpBssEntries(
+ os,
+ "Class",
+ oat_dex_file->GetTypeBssMapping(),
+ dex_file->NumTypeIds(),
+ sizeof(GcRoot<mirror::Class>),
+ [=](uint32_t index) { return dex_file->PrettyType(dex::TypeIndex(index)); });
+ DumpBssEntries(
+ os,
+ "String",
+ oat_dex_file->GetStringBssMapping(),
+ dex_file->NumStringIds(),
+ sizeof(GcRoot<mirror::Class>),
+ [=](uint32_t index) { return dex_file->StringDataByIdx(dex::StringIndex(index)); });
+ }
}
if (!options_.dump_header_only_) {
@@ -594,7 +597,7 @@ class OatDumper {
<< "\n";
}
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
if (!DumpOatDexFile(os, *oat_dex_file)) {
success = false;
@@ -626,7 +629,7 @@ class OatDumper {
size_t i = 0;
for (const auto& vdex_dex_file : vdex_dex_files) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
CHECK(vdex_dex_file != nullptr);
if (!ExportDexFile(os, *oat_dex_file, vdex_dex_file.get())) {
@@ -639,7 +642,8 @@ class OatDumper {
{
os << "OAT FILE STATS:\n";
VariableIndentationOutputStream vios(&os);
- stats_.Dump(vios);
+ stats_.AddBytes(oat_file_.Size());
+ DumpStats(vios, "OatFile", stats_, stats_.Value());
}
os << std::flush;
@@ -665,7 +669,7 @@ class OatDumper {
const void* GetQuickOatCode(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
@@ -737,156 +741,42 @@ class OatDumper {
return vdex_file;
}
- struct Stats {
- enum ByteKind {
- kByteKindCode,
- kByteKindQuickMethodHeader,
- kByteKindCodeInfoLocationCatalog,
- kByteKindCodeInfoDexRegisterMask,
- kByteKindCodeInfoDexRegisterMap,
- kByteKindCodeInfo,
- kByteKindCodeInfoInvokeInfo,
- kByteKindCodeInfoStackMasks,
- kByteKindCodeInfoRegisterMasks,
- kByteKindStackMapNativePc,
- kByteKindStackMapDexPc,
- kByteKindStackMapDexRegisterMap,
- kByteKindStackMapInlineInfoIndex,
- kByteKindStackMapRegisterMaskIndex,
- kByteKindStackMapStackMaskIndex,
- kByteKindInlineInfoMethodIndexIdx,
- kByteKindInlineInfoDexPc,
- kByteKindInlineInfoArtMethod,
- kByteKindInlineInfoDexRegisterMap,
- kByteKindInlineInfoIsLast,
- kByteKindCount,
- // Special ranges for std::accumulate convenience.
- kByteKindStackMapFirst = kByteKindStackMapNativePc,
- kByteKindStackMapLast = kByteKindStackMapStackMaskIndex,
- kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndexIdx,
- kByteKindInlineInfoLast = kByteKindInlineInfoIsLast,
- };
- int64_t bits[kByteKindCount] = {};
- // Since code has deduplication, seen tracks already seen pointers to avoid double counting
- // deduplicated code and tables.
- std::unordered_set<const void*> seen;
-
- // Returns true if it was newly added.
- bool AddBitsIfUnique(ByteKind kind, int64_t count, const void* address) {
- if (seen.insert(address).second == true) {
- // True means the address was not already in the set.
- AddBits(kind, count);
- return true;
+ bool AddStatsObject(const void* address) {
+ return seen_stats_objects_.insert(address).second; // Inserted new entry.
+ }
+
+ void DumpStats(VariableIndentationOutputStream& os,
+ const std::string& name,
+ const Stats& stats,
+ double total) {
+ if (std::fabs(stats.Value()) > 0 || !stats.Children().empty()) {
+ double percent = 100.0 * stats.Value() / total;
+ os.Stream()
+ << std::setw(40 - os.GetIndentation()) << std::left << name << std::right << " "
+ << std::setw(8) << stats.Count() << " "
+ << std::setw(12) << std::fixed << std::setprecision(3) << stats.Value() / KB << "KB "
+ << std::setw(8) << std::fixed << std::setprecision(1) << percent << "%\n";
+
+ // Sort all children by largest value first, than by name.
+ std::map<std::pair<double, std::string>, const Stats&> sorted_children;
+ for (const auto& it : stats.Children()) {
+ sorted_children.emplace(std::make_pair(-it.second.Value(), it.first), it.second);
}
- return false;
- }
- void AddBits(ByteKind kind, int64_t count) {
- bits[kind] += count;
- }
-
- void Dump(VariableIndentationOutputStream& os) {
- const int64_t sum = std::accumulate(bits, bits + kByteKindCount, 0u);
- os.Stream() << "Dumping cumulative use of " << sum / kBitsPerByte << " accounted bytes\n";
- if (sum > 0) {
- Dump(os, "Code ", bits[kByteKindCode], sum);
- Dump(os, "QuickMethodHeader ", bits[kByteKindQuickMethodHeader], sum);
- Dump(os, "CodeInfo ", bits[kByteKindCodeInfo], sum);
- Dump(os, "CodeInfoLocationCatalog ", bits[kByteKindCodeInfoLocationCatalog], sum);
- Dump(os, "CodeInfoDexRegisterMask ", bits[kByteKindCodeInfoDexRegisterMask], sum);
- Dump(os, "CodeInfoDexRegisterMap ", bits[kByteKindCodeInfoDexRegisterMap], sum);
- Dump(os, "CodeInfoStackMasks ", bits[kByteKindCodeInfoStackMasks], sum);
- Dump(os, "CodeInfoRegisterMasks ", bits[kByteKindCodeInfoRegisterMasks], sum);
- Dump(os, "CodeInfoInvokeInfo ", bits[kByteKindCodeInfoInvokeInfo], sum);
- // Stack map section.
- const int64_t stack_map_bits = std::accumulate(bits + kByteKindStackMapFirst,
- bits + kByteKindStackMapLast + 1,
- 0u);
- Dump(os, "CodeInfoStackMap ", stack_map_bits, sum);
- {
- ScopedIndentation indent1(&os);
- Dump(os,
- "StackMapNativePc ",
- bits[kByteKindStackMapNativePc],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapDexPc ",
- bits[kByteKindStackMapDexPc],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapDexRegisterMap ",
- bits[kByteKindStackMapDexRegisterMap],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapInlineInfoIndex ",
- bits[kByteKindStackMapInlineInfoIndex],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapRegisterMaskIndex ",
- bits[kByteKindStackMapRegisterMaskIndex],
- stack_map_bits,
- "stack map");
- Dump(os,
- "StackMapStackMaskIndex ",
- bits[kByteKindStackMapStackMaskIndex],
- stack_map_bits,
- "stack map");
- }
- // Inline info section.
- const int64_t inline_info_bits = std::accumulate(bits + kByteKindInlineInfoFirst,
- bits + kByteKindInlineInfoLast + 1,
- 0u);
- Dump(os, "CodeInfoInlineInfo ", inline_info_bits, sum);
- {
- ScopedIndentation indent1(&os);
- Dump(os,
- "InlineInfoMethodIndexIdx ",
- bits[kByteKindInlineInfoMethodIndexIdx],
- inline_info_bits,
- "inline info");
- Dump(os,
- "InlineInfoDexPc ",
- bits[kByteKindStackMapDexPc],
- inline_info_bits,
- "inline info");
- Dump(os,
- "InlineInfoArtMethod ",
- bits[kByteKindInlineInfoArtMethod],
- inline_info_bits,
- "inline info");
- Dump(os,
- "InlineInfoDexRegisterMap ",
- bits[kByteKindInlineInfoDexRegisterMap],
- inline_info_bits,
- "inline info");
- Dump(os,
- "InlineInfoIsLast ",
- bits[kByteKindInlineInfoIsLast],
- inline_info_bits,
- "inline info");
- }
+ // Add "other" row to represent any amount not account for by the children.
+ Stats other;
+ other.AddBytes(stats.Value() - stats.SumChildrenValues(), stats.Count());
+ if (std::fabs(other.Value()) > 0 && !stats.Children().empty()) {
+ sorted_children.emplace(std::make_pair(-other.Value(), "(other)"), other);
}
- os.Stream() << "\n" << std::flush;
- }
- private:
- void Dump(VariableIndentationOutputStream& os,
- const char* name,
- int64_t size,
- int64_t total,
- const char* sum_of = "total") {
- const double percent = (static_cast<double>(size) / static_cast<double>(total)) * 100;
- os.Stream() << StringPrintf("%s = %8" PRId64 " (%2.0f%% of %s)\n",
- name,
- size / kBitsPerByte,
- percent,
- sum_of);
+ // Print the data.
+ ScopedIndentation indent1(&os);
+ for (const auto& it : sorted_children) {
+ DumpStats(os, it.first.second, it.second, total);
+ }
}
- };
+ }
private:
void AddAllOffsets() {
@@ -895,7 +785,7 @@ class OatDumper {
// region, so if we keep a sorted sequence of the start of each region, we can infer the length
// of a piece of code by using upper_bound to find the start of the next region.
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
- const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
+ const OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(oat_dex_file, &error_msg);
@@ -905,15 +795,13 @@ class OatDumper {
continue;
}
offsets_.insert(reinterpret_cast<uintptr_t>(&dex_file->GetHeader()));
- uint32_t class_def_index = 0u;
for (ClassAccessor accessor : dex_file->GetClasses()) {
- const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(class_def_index);
+ const OatFile::OatClass oat_class = oat_dex_file->GetOatClass(accessor.GetClassDefIndex());
for (uint32_t class_method_index = 0;
class_method_index < accessor.NumMethods();
++class_method_index) {
AddOffsets(oat_class.GetOatMethod(class_method_index));
}
- ++class_def_index;
}
}
@@ -936,7 +824,7 @@ class OatDumper {
offsets_.insert(oat_method.GetVmapTableOffset());
}
- bool DumpOatDexFile(std::ostream& os, const OatFile::OatDexFile& oat_dex_file) {
+ bool DumpOatDexFile(std::ostream& os, const OatDexFile& oat_dex_file) {
bool success = true;
bool stop_analysis = false;
os << "OatDexFile:\n";
@@ -1017,9 +905,7 @@ class OatDumper {
// Dex resource is extracted from the oat_dex_file and its checksum is repaired since it's not
// unquickened. Otherwise the dex_file has been fully unquickened and is expected to verify the
// original checksum.
- bool ExportDexFile(std::ostream& os,
- const OatFile::OatDexFile& oat_dex_file,
- const DexFile* dex_file) {
+ bool ExportDexFile(std::ostream& os, const OatDexFile& oat_dex_file, const DexFile* dex_file) {
std::string error_msg;
std::string dex_file_location = oat_dex_file.GetDexFileLocation();
size_t fsize = oat_dex_file.FileSize();
@@ -1268,9 +1154,9 @@ class OatDumper {
vios->Stream() << "OatQuickMethodHeader ";
uint32_t method_header_offset = oat_method.GetOatQuickMethodHeaderOffset();
const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
- stats_.AddBitsIfUnique(Stats::kByteKindQuickMethodHeader,
- sizeof(*method_header) * kBitsPerByte,
- method_header);
+ if (AddStatsObject(method_header)) {
+ stats_.Child("QuickMethodHeader")->AddBytes(sizeof(*method_header));
+ }
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", method_header);
}
@@ -1342,7 +1228,9 @@ class OatDumper {
const void* code = oat_method.GetQuickCode();
uint32_t aligned_code_begin = AlignCodeOffset(code_offset);
uint64_t aligned_code_end = aligned_code_begin + code_size;
- stats_.AddBitsIfUnique(Stats::kByteKindCode, code_size * kBitsPerByte, code);
+ if (AddStatsObject(code)) {
+ stats_.Child("Code")->AddBytes(code_size);
+ }
if (options_.absolute_addresses_) {
vios->Stream() << StringPrintf("%p ", code);
@@ -1429,7 +1317,7 @@ class OatDumper {
DCHECK(code_item_accessor.HasCodeItem());
ScopedIndentation indent1(vios);
MethodInfo method_info = oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo();
- DumpCodeInfo(vios, code_info, oat_method, code_item_accessor, method_info);
+ DumpCodeInfo(vios, code_info, oat_method, method_info);
}
} else if (IsMethodGeneratedByDexToDexCompiler(oat_method, code_item_accessor)) {
// We don't encode the size in the table, so just emit that we have quickened
@@ -1445,11 +1333,9 @@ class OatDumper {
void DumpCodeInfo(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const OatFile::OatMethod& oat_method,
- const CodeItemDataAccessor& code_item_accessor,
const MethodInfo& method_info) {
code_info.Dump(vios,
oat_method.GetCodeOffset(),
- code_item_accessor.RegistersSize(),
options_.dump_code_info_stack_maps_,
instruction_set_,
method_info);
@@ -1694,78 +1580,15 @@ class OatDumper {
} else if (!bad_input && IsMethodGeneratedByOptimizingCompiler(oat_method,
code_item_accessor)) {
// The optimizing compiler outputs its CodeInfo data in the vmap table.
+ const OatQuickMethodHeader* method_header = oat_method.GetOatQuickMethodHeader();
StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_);
- MethodInfo method_info(oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo());
- {
- const CodeInfo code_info = helper.GetCodeInfo();
- const BitTable<StackMap::kCount>& stack_maps = code_info.stack_maps_;
- const size_t num_stack_maps = stack_maps.NumRows();
- if (stats_.AddBitsIfUnique(Stats::kByteKindCodeInfo,
- code_info.size_ * kBitsPerByte,
- oat_method.GetVmapTable())) {
- // Stack maps
- stats_.AddBits(
- Stats::kByteKindStackMapNativePc,
- stack_maps.NumColumnBits(StackMap::kPackedNativePc) * num_stack_maps);
- stats_.AddBits(
- Stats::kByteKindStackMapDexPc,
- stack_maps.NumColumnBits(StackMap::kDexPc) * num_stack_maps);
- stats_.AddBits(
- Stats::kByteKindStackMapDexRegisterMap,
- stack_maps.NumColumnBits(StackMap::kDexRegisterMapIndex) * num_stack_maps);
- stats_.AddBits(
- Stats::kByteKindStackMapInlineInfoIndex,
- stack_maps.NumColumnBits(StackMap::kInlineInfoIndex) * num_stack_maps);
- stats_.AddBits(
- Stats::kByteKindStackMapRegisterMaskIndex,
- stack_maps.NumColumnBits(StackMap::kRegisterMaskIndex) * num_stack_maps);
- stats_.AddBits(
- Stats::kByteKindStackMapStackMaskIndex,
- stack_maps.NumColumnBits(StackMap::kStackMaskIndex) * num_stack_maps);
-
- // Stack masks
- stats_.AddBits(
- Stats::kByteKindCodeInfoStackMasks,
- code_info.stack_masks_.DataBitSize());
-
- // Register masks
- stats_.AddBits(
- Stats::kByteKindCodeInfoRegisterMasks,
- code_info.register_masks_.DataBitSize());
-
- // Invoke infos
- stats_.AddBits(
- Stats::kByteKindCodeInfoInvokeInfo,
- code_info.invoke_infos_.DataBitSize());
-
- // Location catalog
- stats_.AddBits(Stats::kByteKindCodeInfoLocationCatalog,
- code_info.dex_register_catalog_.DataBitSize());
- stats_.AddBits(Stats::kByteKindCodeInfoDexRegisterMask,
- code_info.dex_register_masks_.DataBitSize());
- stats_.AddBits(Stats::kByteKindCodeInfoDexRegisterMap,
- code_info.dex_register_maps_.DataBitSize());
-
- // Inline infos.
- const BitTable<InlineInfo::kCount>& inline_infos = code_info.inline_infos_;
- const size_t num_inline_infos = inline_infos.NumRows();
- if (num_inline_infos > 0u) {
- stats_.AddBits(
- Stats::kByteKindInlineInfoMethodIndexIdx,
- inline_infos.NumColumnBits(InlineInfo::kMethodInfoIndex) * num_inline_infos);
- stats_.AddBits(
- Stats::kByteKindInlineInfoDexPc,
- inline_infos.NumColumnBits(InlineInfo::kDexPc) * num_inline_infos);
- stats_.AddBits(
- Stats::kByteKindInlineInfoArtMethod,
- inline_infos.NumColumnBits(InlineInfo::kArtMethodHi) * num_inline_infos +
- inline_infos.NumColumnBits(InlineInfo::kArtMethodLo) * num_inline_infos);
- stats_.AddBits(
- Stats::kByteKindInlineInfoDexRegisterMap,
- inline_infos.NumColumnBits(InlineInfo::kDexRegisterMapIndex) * num_inline_infos);
- stats_.AddBits(Stats::kByteKindInlineInfoIsLast, num_inline_infos);
- }
- }
+ if (AddStatsObject(oat_method.GetVmapTable())) {
+ helper.GetCodeInfo().AddSizeStats(&stats_);
+ }
+ MethodInfo method_info(method_header->GetOptimizedMethodInfo());
+ if (AddStatsObject(method_header->GetOptimizedMethodInfoPtr())) {
+ size_t method_info_size = MethodInfo::ComputeSize(method_info.NumMethodIndices());
+ stats_.Child("MethodInfo")->AddBytes(method_info_size);
}
const uint8_t* quick_native_pc = reinterpret_cast<const uint8_t*>(quick_code);
size_t offset = 0;
@@ -1779,7 +1602,6 @@ class OatDumper {
helper.GetCodeInfo(),
method_info,
oat_method.GetCodeOffset(),
- code_item_accessor.RegistersSize(),
instruction_set_);
do {
helper.Next();
@@ -1892,13 +1714,14 @@ class OatDumper {
}
const OatFile& oat_file_;
- const std::vector<const OatFile::OatDexFile*> oat_dex_files_;
+ const std::vector<const OatDexFile*> oat_dex_files_;
const OatDumperOptions& options_;
uint32_t resolved_addr2instr_;
const InstructionSet instruction_set_;
std::set<uintptr_t> offsets_;
Disassembler* disassembler_;
Stats stats_;
+ std::unordered_set<const void*> seen_stats_objects_;
};
class ImageDumper {
@@ -2030,7 +1853,7 @@ class ImageDumper {
oat_dumper_.reset(new OatDumper(*oat_file, *oat_dumper_options_));
- for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
CHECK(oat_dex_file != nullptr);
stats_.oat_dex_file_sizes.push_back(std::make_pair(oat_dex_file->GetDexFileLocation(),
oat_dex_file->FileSize()));
@@ -2956,7 +2779,7 @@ static jobject InstallOatFile(Runtime* runtime,
OatFile* oat_file_ptr = oat_file.get();
ClassLinker* class_linker = runtime->GetClassLinker();
runtime->GetOatFileManager().RegisterOatFile(std::move(oat_file));
- for (const OatFile::OatDexFile* odf : oat_file_ptr->GetOatDexFiles()) {
+ for (const OatDexFile* odf : oat_file_ptr->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 293acdc3a6..231163b674 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -158,7 +158,7 @@ class OatDumpTest : public CommonRuntimeTest {
// Code and dex code do not show up if list only.
expected_prefixes.push_back("DEX CODE:");
expected_prefixes.push_back("CODE:");
- expected_prefixes.push_back("CodeInfoInlineInfo");
+ expected_prefixes.push_back("InlineInfos");
}
if (mode == kModeArt) {
exec_argv.push_back("--image=" + core_art_location_);
diff --git a/openjdkjvmti/ti_ddms.cc b/openjdkjvmti/ti_ddms.cc
index 0b4906d798..bf063faf7b 100644
--- a/openjdkjvmti/ti_ddms.cc
+++ b/openjdkjvmti/ti_ddms.cc
@@ -60,7 +60,7 @@ jvmtiError DDMSUtil::HandleChunk(jvmtiEnv* env,
*data_out = nullptr;
art::Thread* self = art::Thread::Current();
- art::ScopedThreadStateChange(self, art::ThreadState::kNative);
+ art::ScopedThreadStateChange stcs(self, art::ThreadState::kNative);
art::ArrayRef<const jbyte> data_arr(data_in, length_in);
std::vector<uint8_t> out_data;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 01418b0b79..97b315e85c 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -539,7 +539,7 @@ bool PatchOat::Patch(const std::string& image_location,
space_to_memmap_map.emplace(space, std::move(image));
PatchOat p = PatchOat(isa,
- space_to_memmap_map.at(space).get(),
+ space_to_memmap_map[space].get(),
space->GetLiveBitmap(),
space->GetMemMap(),
delta,
@@ -615,7 +615,7 @@ bool PatchOat::Patch(const std::string& image_location,
}
}
- if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
// We want to just exit on non-debug builds, not bringing the runtime down
// in an orderly fashion. So release the following fields.
runtime.release();
@@ -695,7 +695,7 @@ bool PatchOat::Verify(const std::string& image_location,
}
}
- if (!kIsDebugBuild && !(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
+ if (!kIsDebugBuild && !(kRunningOnMemoryTool && kMemoryToolDetectsLeaks)) {
// We want to just exit on non-debug builds, not bringing the runtime down
// in an orderly fashion. So release the following fields.
runtime.release();
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
index 934936d4b3..08bf31c4bd 100644
--- a/patchoat/patchoat_test.cc
+++ b/patchoat/patchoat_test.cc
@@ -445,19 +445,15 @@ TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
#endif
}
+// These tests check that a boot image relocated using patchoat can be unrelocated
+// using the .rel file created by patchoat.
+//
+// The tests don't work when heap poisoning is enabled because some of the
+// references are negated. b/72117833 is tracking the effort to have patchoat
+// and its tests support heap poisoning.
class PatchoatVerificationTest : public PatchoatTest {
protected:
- virtual void SetUp() {
- PatchoatTest::SetUp();
-
- // This test checks that a boot image relocated using patchoat can be unrelocated using the .rel
- // file created by patchoat.
-
- // This test doesn't work when heap poisoning is enabled because some of the
- // references are negated. b/72117833 is tracking the effort to have patchoat
- // and its tests support heap poisoning.
- TEST_DISABLED_FOR_HEAP_POISONING();
-
+ void CreateRelocatedBootImage() {
// Compile boot image into a random directory using dex2oat
ScratchFile dex2oat_orig_scratch;
dex2oat_orig_scratch.Unlink();
@@ -534,12 +530,14 @@ class PatchoatVerificationTest : public PatchoatTest {
}
virtual void TearDown() {
- ClearDirectory(dex2oat_orig_dir_.c_str(), /*recursive*/ true);
- ClearDirectory(relocated_dir_.c_str(), /*recursive*/ true);
-
- rmdir(dex2oat_orig_dir_.c_str());
- rmdir(relocated_dir_.c_str());
-
+ if (!dex2oat_orig_dir_.empty()) {
+ ClearDirectory(dex2oat_orig_dir_.c_str(), /*recursive*/ true);
+ rmdir(dex2oat_orig_dir_.c_str());
+ }
+ if (!relocated_dir_.empty()) {
+ ClearDirectory(relocated_dir_.c_str(), /*recursive*/ true);
+ rmdir(relocated_dir_.c_str());
+ }
PatchoatTest::TearDown();
}
@@ -550,6 +548,9 @@ class PatchoatVerificationTest : public PatchoatTest {
// Assert that verification works with the .rel files.
TEST_F(PatchoatVerificationTest, Sucessful) {
+ TEST_DISABLED_FOR_HEAP_POISONING();
+ CreateRelocatedBootImage();
+
std::string error_msg;
if (!VerifyBootImage(
dex2oat_orig_dir_ + "/boot.art",
@@ -562,6 +563,9 @@ TEST_F(PatchoatVerificationTest, Sucessful) {
// Corrupt the image file and check that the verification fails gracefully.
TEST_F(PatchoatVerificationTest, CorruptedImage) {
+ TEST_DISABLED_FOR_HEAP_POISONING();
+ CreateRelocatedBootImage();
+
std::string error_msg;
std::string relocated_image_filename;
if (!GetDalvikCacheFilename((dex2oat_orig_dir_ + "/boot.art").c_str(),
@@ -584,6 +588,9 @@ TEST_F(PatchoatVerificationTest, CorruptedImage) {
// Corrupt the relocation file and check that the verification fails gracefully.
TEST_F(PatchoatVerificationTest, CorruptedRelFile) {
+ TEST_DISABLED_FOR_HEAP_POISONING();
+ CreateRelocatedBootImage();
+
std::string error_msg;
std::string art_filename = dex2oat_orig_dir_ + "/boot.art";
std::string rel_filename = dex2oat_orig_dir_ + "/boot.art.rel";
diff --git a/profman/profman.cc b/profman/profman.cc
index 096e5dc3bd..9b470973c6 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -389,7 +389,7 @@ class ProfMan FINAL {
}
bool OpenApkFilesFromLocations(
- std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
+ const std::function<void(std::unique_ptr<const DexFile>&&)>& process_fn) {
bool use_apk_fd_list = !apks_fd_.empty();
if (use_apk_fd_list) {
// Get the APKs from the collection of FDs.
@@ -930,7 +930,9 @@ class ProfMan FINAL {
dex_resolved_classes.first->AddClass(class_ref.TypeIndex());
std::vector<ProfileMethodInfo> methods;
if (method_str == kClassAllMethods) {
- ClassAccessor accessor(*dex_file, *dex_file->FindClassDef(class_ref.TypeIndex()));
+ ClassAccessor accessor(
+ *dex_file,
+ dex_file->GetIndexForClassDef(*dex_file->FindClassDef(class_ref.TypeIndex())));
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
if (method.GetCodeItemOffset() != 0) {
// Add all of the methods that have code to the profile.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 777a1fc5ee..0345c2f4f7 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -37,7 +37,6 @@ libart_cc_defaults {
"base/quasi_atomic.cc",
"base/timing_logger.cc",
"cha.cc",
- "check_jni.cc",
"class_linker.cc",
"class_loader_context.cc",
"class_root.cc",
@@ -47,6 +46,7 @@ libart_cc_defaults {
"debug_print.cc",
"debugger.cc",
"dex/dex_file_annotations.cc",
+ "dex_register_location.cc",
"dex_to_dex_decompiler.cc",
"elf_file.cc",
"exec_utils.cc",
@@ -111,6 +111,7 @@ libart_cc_defaults {
"jit/jit_code_cache.cc",
"jit/profiling_info.cc",
"jit/profile_saver.cc",
+ "jni/check_jni.cc",
"jni/java_vm_ext.cc",
"jni/jni_env_ext.cc",
"jni/jni_internal.cc",
@@ -337,14 +338,10 @@ libart_cc_defaults {
"thread_android.cc",
],
shared_libs: [
- // For android::FileMap used by libziparchive.
- "libutils",
"libtombstoned_client",
],
static_libs: [
- // ZipArchive support, the order matters here to get all symbols.
- "libziparchive",
- "libz",
+ "libz", // For adler32.
],
},
android_arm: {
@@ -366,8 +363,7 @@ libart_cc_defaults {
"thread_linux.cc",
],
shared_libs: [
- "libziparchive",
- "libz",
+ "libz", // For adler32.
],
},
},
@@ -599,7 +595,6 @@ art_cc_test {
],
shared_libs: [
"libbacktrace",
- "libziparchive",
],
header_libs: [
"art_cmdlineparser_headers", // For parsed_options_test.
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 311e838fb3..ccff9f6a7b 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -2686,84 +2686,31 @@ END art_quick_read_barrier_mark_introspection
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
- mov r2, rSELF @ pass Thread::Current
- mov r3, sp @ pass SP
- mov r0, #0 @ initialize 64-bit JValue as zero.
- str r0, [sp, #-4]!
- .cfi_adjust_cfa_offset 4
- str r0, [sp, #-4]!
- .cfi_adjust_cfa_offset 4
- mov r0, sp @ pass JValue for return result as first argument.
- bl artInvokePolymorphic @ artInvokePolymorphic(JValue, receiver, Thread*, SP)
- sub r0, 'A' @ return value is descriptor of handle's return type.
- cmp r0, 'Z' - 'A' @ check if value is in bounds of handler table
- bgt .Lcleanup_and_return @ and clean-up if not.
- adr r1, .Lhandler_table
- tbb [r0, r1] @ branch to handler for return value based on return type.
-
-.Lstart_of_handlers:
-.Lstore_boolean_result:
- ldrb r0, [sp] @ Copy boolean value to return value of this function.
- b .Lcleanup_and_return
-.Lstore_char_result:
- ldrh r0, [sp] @ Copy char value to return value of this function.
- b .Lcleanup_and_return
-.Lstore_float_result:
- vldr s0, [sp] @ Copy float value from JValue result to the context restored by
- vstr s0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- b .Lcleanup_and_return
-.Lstore_double_result:
- vldr d0, [sp] @ Copy double value from JValue result to the context restored by
- vstr d0, [sp, #16] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- b .Lcleanup_and_return
-.Lstore_long_result:
- ldr r1, [sp, #4] @ Copy the upper bits from JValue result to the context restored by
- str r1, [sp, #80] @ RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- // Fall-through for lower bits.
-.Lstore_int_result:
- ldr r0, [sp] @ Copy int value to return value of this function.
- // Fall-through to clean up and return.
-.Lcleanup_and_return:
- add sp, #8
- .cfi_adjust_cfa_offset -8
+ mov r0, r1 @ r0 := receiver
+ mov r1, rSELF @ r1 := Thread::Current
+ mov r2, sp @ r2 := SP
+ bl artInvokePolymorphic @ artInvokePolymorphic(receiver, Thread*, SP)
+ str r1, [sp, 72] @ r0:r1 := Result. Copy r1 to context.
RESTORE_SAVE_REFS_AND_ARGS_FRAME
REFRESH_MARKING_REGISTER
+ vmov d0, r0, r1 @ Put result r0:r1 into floating point return register.
RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2
-
-.macro HANDLER_TABLE_OFFSET handler_label
- .byte (\handler_label - .Lstart_of_handlers) / 2
-.endm
-
-.Lhandler_table:
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
- HANDLER_TABLE_OFFSET(.Lstore_int_result) // B (byte)
- HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
- HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
- HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
- HANDLER_TABLE_OFFSET(.Lstore_int_result) // I (int)
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
- HANDLER_TABLE_OFFSET(.Lstore_int_result) // L (object)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
- HANDLER_TABLE_OFFSET(.Lstore_int_result) // S (short)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
- HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
-.purgem HANDLER_TABLE_OFFSET
END art_quick_invoke_polymorphic
+.extern artInvokeCustom
+ENTRY art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME r1
+ @ r0 := call_site_idx
+ mov r1, rSELF @ r1 := Thread::Current
+ mov r2, sp @ r2 := SP
+ bl artInvokeCustom @ artInvokeCustom(call_site_idx, Thread*, SP)
+ str r1, [sp, #72] @ Save r1 to context (r0:r1 = result)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ REFRESH_MARKING_REGISTER
+ vmov d0, r0, r1 @ Put result r0:r1 into floating point return register.
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r2
+END art_quick_invoke_custom
+
// Wrap ExecuteSwitchImpl in assembly method which specifies DEX PC for unwinding.
// Argument 0: r0: The context pointer for ExecuteSwitchImpl.
// Argument 1: r1: Pointer to the templated ExecuteSwitchImpl to call.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 14d0cc7db4..80d5fce423 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -2844,84 +2844,30 @@ END art_quick_read_barrier_mark_introspection
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
- SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC.
- mov x2, xSELF
- mov x3, sp
- INCREASE_FRAME 16 // Reserve space for JValue result.
- str xzr, [sp, #0] // Initialize result to zero.
- mov x0, sp // Set r0 to point to result.
- bl artInvokePolymorphic // artInvokePolymorphic(result, receiver, thread, save_area)
- uxtb w0, w0 // Result is the return type descriptor as a char.
- sub w0, w0, 'A' // Convert to zero based index.
- cmp w0, 'Z' - 'A'
- bhi .Lcleanup_and_return // Clean-up if out-of-bounds.
- adrp x1, .Lhandler_table // Compute address of handler table.
- add x1, x1, :lo12:.Lhandler_table
- ldrb w0, [x1, w0, uxtw] // Lookup handler offset in handler table.
- adr x1, .Lstart_of_handlers
- add x0, x1, w0, sxtb #2 // Convert relative offset to absolute address.
- br x0 // Branch to handler.
-
-.Lstart_of_handlers:
-.Lstore_boolean_result:
- ldrb w0, [sp]
- b .Lcleanup_and_return
-.Lstore_char_result:
- ldrh w0, [sp]
- b .Lcleanup_and_return
-.Lstore_float_result:
- ldr s0, [sp]
- str s0, [sp, #32]
- b .Lcleanup_and_return
-.Lstore_double_result:
- ldr d0, [sp]
- str d0, [sp, #32]
- b .Lcleanup_and_return
-.Lstore_long_result:
- ldr x0, [sp]
- // Fall-through
-.Lcleanup_and_return:
- DECREASE_FRAME 16
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC.
+ mov x0, x1 // x0 := receiver
+ mov x1, xSELF // x1 := Thread::Current()
+ mov x2, sp // x2 := SP
+ bl artInvokePolymorphic // artInvokePolymorphic(receiver, thread, save_area)
RESTORE_SAVE_REFS_AND_ARGS_FRAME
REFRESH_MARKING_REGISTER
- RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-
- .section .rodata // Place handler table in read-only section away from text.
- .align 2
-.macro HANDLER_TABLE_OFFSET handler_label
- .byte (\handler_label - .Lstart_of_handlers) / 4
-.endm
-.Lhandler_table:
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
- HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
- HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
- HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
- HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
- .text
-
+ fmov d0, x0 // Result is in x0. Copy to floating return register.
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
END art_quick_invoke_polymorphic
+.extern artInvokeCustom
+ENTRY art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC.
+ // x0 := call_site_idx
+ mov x1, xSELF // x1 := Thread::Current()
+ mov x2, sp // x2 := SP
+ bl artInvokeCustom // artInvokeCustom(call_site_idx, thread, save_area)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ REFRESH_MARKING_REGISTER
+ fmov d0, x0 // Copy result to double result register.
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_custom
+
// Wrap ExecuteSwitchImpl in assembly method which specifies DEX PC for unwinding.
// Argument 0: x0: The context pointer for ExecuteSwitchImpl.
// Argument 1: x1: Pointer to the templated ExecuteSwitchImpl to call.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 5d6e410101..2b69c1753b 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -410,6 +410,9 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
"Non-direct C stub marked direct.");
qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
+ static_assert(!IsDirectEntrypoint(kQuickInvokePolymorphic), "Non-direct C stub marked direct.");
+ qpoints->pInvokeCustom = art_quick_invoke_custom;
+ static_assert(!IsDirectEntrypoint(kQuickInvokeCustom), "Non-direct C stub marked direct.");
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index c367ea60c2..508a2013b7 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -3246,59 +3246,48 @@ art_quick_read_barrier_mark_introspection_end_of_entries:
BRB_FIELD_EXIT_BREAK
END art_quick_read_barrier_mark_introspection
+ /*
+ * Polymorphic method invocation.
+ * On entry:
+ * a0 = unused
+ * a1 = receiver
+ */
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a2, rSELF # Make $a2 an alias for the current Thread.
- addiu $a3, $sp, ARG_SLOT_SIZE # Make $a3 a pointer to the saved frame context.
- sw $zero, 20($sp) # Initialize JValue result.
- sw $zero, 16($sp)
- la $t9, artInvokePolymorphic
- jalr $t9 # artInvokePolymorphic(result, receiver, Thread*, context)
- addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result
-.macro MATCH_RETURN_TYPE c, handler
- li $t0, \c
- beq $v0, $t0, \handler
-.endm
- MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
- MATCH_RETURN_TYPE 'L', .Lstore_int_result
- MATCH_RETURN_TYPE 'I', .Lstore_int_result
- MATCH_RETURN_TYPE 'J', .Lstore_long_result
- MATCH_RETURN_TYPE 'B', .Lstore_int_result
- MATCH_RETURN_TYPE 'C', .Lstore_char_result
- MATCH_RETURN_TYPE 'D', .Lstore_double_result
- MATCH_RETURN_TYPE 'F', .Lstore_float_result
- MATCH_RETURN_TYPE 'S', .Lstore_int_result
- MATCH_RETURN_TYPE 'Z', .Lstore_boolean_result
-.purgem MATCH_RETURN_TYPE
- nop
- b .Lcleanup_and_return
- nop
-.Lstore_boolean_result:
- b .Lcleanup_and_return
- lbu $v0, 16($sp) # Move byte from JValue result to return value register.
-.Lstore_char_result:
- b .Lcleanup_and_return
- lhu $v0, 16($sp) # Move char from JValue result to return value register.
-.Lstore_double_result:
-.Lstore_float_result:
- CHECK_ALIGNMENT $sp, $t0
- ldc1 $f0, 16($sp) # Move double/float from JValue result to return value register.
- b .Lcleanup_and_return
- nop
-.Lstore_long_result:
- lw $v1, 20($sp) # Move upper bits from JValue result to return value register.
- // Fall-through for lower bits.
-.Lstore_int_result:
- lw $v0, 16($sp) # Move lower bits from JValue result to return value register.
- // Fall-through to clean up and return.
-.Lcleanup_and_return:
- lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
+ move $a0, $a1 # Make $a0 the receiver.
+ move $a1, rSELF # Make $a1 an alias for the current Thread.
+ la $t9, artInvokePolymorphic # Invoke artInvokePolymorphic
+ jalr $t9 # with args (receiver, Thread*, context).
+ addiu $a2, $sp, ARG_SLOT_SIZE # Make $a2 a pointer to the saved frame context.
+ lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t7, 1f # Success if no exception is pending.
- nop
- jalr $zero, $ra
+ bnez $t7, 1f
+ # don't care if $v0 and/or $v1 are modified, when exception branch taken
+ MTD $v0, $v1, $f0, $f1 # move float value to return value
+ jalr $zero, $ra
nop
1:
DELIVER_PENDING_EXCEPTION
END art_quick_invoke_polymorphic
+
+ /*
+ * InvokeCustom invocation.
+ * On entry:
+ * a0 = call_site_idx
+ */
+.extern artInvokeCustom
+ENTRY art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a1, rSELF # Make $a1 an alias for the current Thread.
+ la $t9, artInvokeCustom # Invoke artInvokeCustom
+ jalr $t9 # with args (call_site_idx, Thread*, context).
+ addiu $a2, $sp, ARG_SLOT_SIZE # Make $a2 a pointer to the saved frame context.
+ lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ bnez $t7, 1f
+ # don't care if $v0 and/or $v1 are modified, when exception branch taken
+ MTD $v0, $v1, $f0, $f1 # move float value to return value
+ jalr $zero, $ra
+ nop
+END art_quick_invoke_custom
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 1f4f174e26..258acddd47 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -3046,61 +3046,49 @@ art_quick_read_barrier_mark_introspection_end_of_entries:
BRB_FIELD_EXIT_BREAK
END art_quick_read_barrier_mark_introspection
+ /*
+ * Polymorphic method invocation.
+ * On entry:
+ * a0 = unused
+ * a1 = receiver
+ */
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
- move $a2, rSELF # Make $a2 an alias for the current Thread.
- move $a3, $sp # Make $a3 a pointer to the saved frame context.
- daddiu $sp, $sp, -8 # Reserve space for JValue result.
- .cfi_adjust_cfa_offset 8
- sd $zero, 0($sp) # Initialize JValue result.
- jal artInvokePolymorphic # artInvokePolymorphic(result, receiver, Thread*, context)
- move $a0, $sp # Make $a0 a pointer to the JValue result
-.macro MATCH_RETURN_TYPE c, handler
- li $t0, \c
- beq $v0, $t0, \handler
-.endm
- MATCH_RETURN_TYPE 'V', .Lcleanup_and_return
- MATCH_RETURN_TYPE 'L', .Lstore_ref_result
- MATCH_RETURN_TYPE 'I', .Lstore_long_result
- MATCH_RETURN_TYPE 'J', .Lstore_long_result
- MATCH_RETURN_TYPE 'B', .Lstore_long_result
- MATCH_RETURN_TYPE 'C', .Lstore_char_result
- MATCH_RETURN_TYPE 'D', .Lstore_double_result
- MATCH_RETURN_TYPE 'F', .Lstore_float_result
- MATCH_RETURN_TYPE 'S', .Lstore_long_result
- MATCH_RETURN_TYPE 'Z', .Lstore_boolean_result
-.purgem MATCH_RETURN_TYPE
- nop
- b .Lcleanup_and_return
- nop
-.Lstore_boolean_result:
- b .Lcleanup_and_return
- lbu $v0, 0($sp) # Move byte from JValue result to return value register.
-.Lstore_char_result:
- b .Lcleanup_and_return
- lhu $v0, 0($sp) # Move char from JValue result to return value register.
-.Lstore_double_result:
-.Lstore_float_result:
- b .Lcleanup_and_return
- l.d $f0, 0($sp) # Move double/float from JValue result to return value register.
-.Lstore_ref_result:
- b .Lcleanup_and_return
- lwu $v0, 0($sp) # Move zero extended lower 32-bits to return value register.
-.Lstore_long_result:
- ld $v0, 0($sp) # Move long from JValue result to return value register.
- // Fall-through to clean up and return.
-.Lcleanup_and_return:
- daddiu $sp, $sp, 8 # Remove space for JValue result.
- .cfi_adjust_cfa_offset -8
- ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_
- RESTORE_SAVE_REFS_AND_ARGS_FRAME
- bnez $t0, 1f # Success if no exception is pending.
- nop
- jalr $zero, $ra
- nop
+ move $a0, $a1 # Make $a0 the receiver
+ move $a1, rSELF # Make $a1 an alias for the current Thread.
+ jal artInvokePolymorphic # artInvokePolymorphic(receiver, Thread*, context)
+ move $a2, $sp # Make $a3 a pointer to the saved frame context.
+ ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ bne $t0, $zero, 1f
+ dmtc1 $v0, $f0 # place return value to FP return value
+ jalr $zero, $ra
+ dmtc1 $v1, $f1 # place return value to FP return value
1:
DELIVER_PENDING_EXCEPTION
END art_quick_invoke_polymorphic
+ /*
+ * InvokeCustom invocation.
+ * On entry:
+ * a0 = call_site_idx
+ */
+.extern artInvokeCustom
+ENTRY art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME
+ move $a1, rSELF # Make $a1 an alias for the current Thread.
+ jal artInvokeCustom # Call artInvokeCustom(call_site_idx, Thread*, context).
+ move $a2, $sp # Make $a1 a pointer to the saved frame context.
+ ld $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
+ daddiu $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE # skip a0-a7 and f12-f19
+ RESTORE_SAVE_REFS_ONLY_FRAME
+ bne $t0, $zero, 1f
+ dmtc1 $v0, $f0 # place return value to FP return value
+ jalr $zero, $ra
+ dmtc1 $v1, $f1 # place return value to FP return value
+1:
+ DELIVER_PENDING_EXCEPTION
+END art_quick_invoke_polymorphic
.set pop
diff --git a/runtime/arch/x86/instruction_set_features_x86.cc b/runtime/arch/x86/instruction_set_features_x86.cc
index 98462512da..745e925611 100644
--- a/runtime/arch/x86/instruction_set_features_x86.cc
+++ b/runtime/arch/x86/instruction_set_features_x86.cc
@@ -35,6 +35,7 @@ static constexpr const char* x86_known_variants[] = {
"atom",
"sandybridge",
"silvermont",
+ "kabylake"
};
static constexpr const char* x86_variants_with_ssse3[] = {
@@ -46,16 +47,27 @@ static constexpr const char* x86_variants_with_ssse3[] = {
static constexpr const char* x86_variants_with_sse4_1[] = {
"sandybridge",
"silvermont",
+ "kabylake"
};
static constexpr const char* x86_variants_with_sse4_2[] = {
"sandybridge",
"silvermont",
+ "kabylake"
};
static constexpr const char* x86_variants_with_popcnt[] = {
"sandybridge",
"silvermont",
+ "kabylake"
+};
+
+static constexpr const char* x86_variants_with_avx[] = {
+ "kabylake",
+};
+
+static constexpr const char* x86_variants_with_avx2[] = {
+ "kabylake",
};
X86FeaturesUniquePtr X86InstructionSetFeatures::Create(bool x86_64,
@@ -93,9 +105,12 @@ X86FeaturesUniquePtr X86InstructionSetFeatures::FromVariant(
bool has_SSE4_2 = FindVariantInArray(x86_variants_with_sse4_2,
arraysize(x86_variants_with_sse4_2),
variant);
- bool has_AVX = false;
- bool has_AVX2 = false;
-
+ bool has_AVX = FindVariantInArray(x86_variants_with_avx,
+ arraysize(x86_variants_with_avx),
+ variant);
+ bool has_AVX2 = FindVariantInArray(x86_variants_with_avx2,
+ arraysize(x86_variants_with_avx2),
+ variant);
bool has_POPCNT = FindVariantInArray(x86_variants_with_popcnt,
arraysize(x86_variants_with_popcnt),
variant);
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 57cf4b2741..f5974cc2e1 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -67,6 +67,8 @@ class X86InstructionSetFeatures : public InstructionSetFeatures {
bool HasPopCnt() const { return has_POPCNT_; }
+ bool HasAVX2() const { return has_AVX2_; }
+
protected:
// Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
virtual std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index b89d45f617..e1b3df8621 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2434,99 +2434,49 @@ DEFINE_FUNCTION art_quick_osr_stub
END_FUNCTION art_quick_osr_stub
DEFINE_FUNCTION art_quick_invoke_polymorphic
- SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // Save frame.
- mov %esp, %edx // Remember SP.
- subl LITERAL(16), %esp // Make space for JValue result.
- CFI_ADJUST_CFA_OFFSET(16)
- movl LITERAL(0), (%esp) // Initialize result to zero.
- movl LITERAL(0), 4(%esp)
- mov %esp, %eax // Store pointer to JValue result in eax.
- PUSH edx // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ // On entry: EAX := unused, ECX := receiver
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // Save frame.
+ mov %esp, %edx // Remember SP
+ sub LITERAL(4), %esp // Alignment padding
CFI_ADJUST_CFA_OFFSET(4)
- PUSH ecx // pass receiver (method handle)
- PUSH eax // pass JResult
- call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
- subl LITERAL('A'), %eax // Eliminate out of bounds options
- cmpb LITERAL('Z' - 'A'), %al
- ja .Lcleanup_and_return
- movzbl %al, %eax
- call .Lput_eip_in_ecx
-.Lbranch_start:
- movl %ecx, %edx
- add $(.Lhandler_table - .Lbranch_start), %edx // Make EDX point to handler_table.
- leal (%edx, %eax, 2), %eax // Calculate address of entry in table.
- movzwl (%eax), %eax // Lookup relative branch in table.
- addl %ecx, %eax // Add EIP relative offset.
- jmp *%eax // Branch to handler.
-
- // Handlers for different return types.
-.Lstore_boolean_result:
- movzbl 16(%esp), %eax // Copy boolean result to the accumulator.
- jmp .Lcleanup_and_return
-.Lstore_char_result:
- movzwl 16(%esp), %eax // Copy char result to the accumulator.
- jmp .Lcleanup_and_return
-.Lstore_float_result:
- movd 16(%esp), %xmm0 // Copy float result to the context restored by
- movd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
- jmp .Lcleanup_and_return
-.Lstore_double_result:
- movsd 16(%esp), %xmm0 // Copy double result to the context restored by
- movsd %xmm0, 36(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
- jmp .Lcleanup_and_return
-.Lstore_long_result:
- movl 20(%esp), %edx // Copy upper-word of result to the context restored by
- movl %edx, 72(%esp) // RESTORE_SAVE_REFS_ONLY_FRAME.
- // Fall-through for lower bits.
-.Lstore_int_result:
- movl 16(%esp), %eax // Copy int result to the accumulator.
- // Fall-through to clean up and return.
-.Lcleanup_and_return:
- addl LITERAL(32), %esp // Pop arguments and stack allocated JValue result.
- CFI_ADJUST_CFA_OFFSET(-32)
+ push %edx // Push SP
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl %fs:THREAD_SELF_OFFSET // Push Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ push %ecx // Push receiver (method handle)
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artInvokePolymorphic) // invoke with (receiver, thread, SP)
+ addl LITERAL(16), %esp // Pop arguments.
+ CFI_ADJUST_CFA_OFFSET(-16)
+ mov %eax, 4(%esp) // Result is in EAX:EDX. Copy to saved FP state.
+ mov %edx, 8(%esp)
+ mov %edx, 40(%esp) // Copy EDX to saved context
RESTORE_SAVE_REFS_AND_ARGS_FRAME
RETURN_OR_DELIVER_PENDING_EXCEPTION
-
-.Lput_eip_in_ecx: // Internal function that puts address of
- movl 0(%esp), %ecx // next instruction into ECX when CALL
- ret
-
- // Handler table to handlers for given type.
-.Lhandler_table:
-MACRO1(HANDLER_TABLE_ENTRY, handler_label)
- // NB some tools require 16-bits for relocations. Shouldn't need adjusting.
- .word RAW_VAR(handler_label) - .Lbranch_start
-END_MACRO
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // A
- HANDLER_TABLE_ENTRY(.Lstore_int_result) // B (byte)
- HANDLER_TABLE_ENTRY(.Lstore_char_result) // C (char)
- HANDLER_TABLE_ENTRY(.Lstore_double_result) // D (double)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // E
- HANDLER_TABLE_ENTRY(.Lstore_float_result) // F (float)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // G
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // H
- HANDLER_TABLE_ENTRY(.Lstore_int_result) // I (int)
- HANDLER_TABLE_ENTRY(.Lstore_long_result) // J (long)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // K
- HANDLER_TABLE_ENTRY(.Lstore_int_result) // L (object)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // M
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // N
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // O
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // P
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Q
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // R
- HANDLER_TABLE_ENTRY(.Lstore_int_result) // S (short)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // T
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // U
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // V (void)
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // W
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // X
- HANDLER_TABLE_ENTRY(.Lcleanup_and_return) // Y
- HANDLER_TABLE_ENTRY(.Lstore_boolean_result) // Z (boolean)
-
END_FUNCTION art_quick_invoke_polymorphic
+DEFINE_FUNCTION art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME ebx, ebx // Save frame.
+ // EAX := call_site_index
+ mov %esp, %ecx // Remember SP.
+ subl LITERAL(4), %esp // Alignment padding.
+ CFI_ADJUST_CFA_OFFSET(4)
+ push %ecx // pass SP
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ push %eax // pass call_site_index
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artInvokeCustom) // artInvokeCustom(call_site_index, Thread*, SP)
+ addl LITERAL(16), %esp // Pop arguments.
+ CFI_ADJUST_CFA_OFFSET(-16)
+ mov %eax, 4(%esp) // Result is in EAX:EDX. Copy to saved FP state.
+ mov %edx, 8(%esp)
+ mov %edx, 40(%esp) // Copy EDX to saved context
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_invoke_custom
+
// Wrap ExecuteSwitchImpl in assembly method which specifies DEX PC for unwinding.
// Argument 0: ESP+4: The context pointer for ExecuteSwitchImpl.
// Argument 1: ESP+8: Pointer to the templated ExecuteSwitchImpl to call.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c179033e6b..9980966967 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2418,81 +2418,29 @@ DEFINE_FUNCTION art_quick_osr_stub
END_FUNCTION art_quick_osr_stub
DEFINE_FUNCTION art_quick_invoke_polymorphic
+ // On entry: RDI := unused, RSI := receiver
SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread
- movq %rsp, %rcx // pass SP
- subq LITERAL(16), %rsp // make space for JValue result
- CFI_ADJUST_CFA_OFFSET(16)
- movq LITERAL(0), (%rsp) // initialize result
- movq %rsp, %rdi // store pointer to JValue result
- call SYMBOL(artInvokePolymorphic) // artInvokePolymorphic(result, receiver, Thread*, SP)
+ movq %rsi, %rdi // RDI := receiver
+ movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread (self)
+ movq %rsp, %rdx // RDX := pass SP
+ call SYMBOL(artInvokePolymorphic) // invoke with (receiver, self, SP)
// save the code pointer
- subq LITERAL('A'), %rax // Convert type descriptor character value to a zero based index.
- cmpb LITERAL('Z' - 'A'), %al // Eliminate out of bounds options
- ja .Lcleanup_and_return
- movzbq %al, %rax
- leaq .Lhandler_table(%rip), %rcx // Get the address of the handler table
- movslq (%rcx, %rax, 4), %rax // Lookup handler offset relative to table
- addq %rcx, %rax // Add table address to yield handler address.
- jmpq *%rax // Jump to handler.
-
-.align 4
-.Lhandler_table: // Table of type descriptor to handlers.
-MACRO1(HANDLER_TABLE_OFFSET, handle_label)
- // NB some tools require 32-bits for relocations. Shouldn't need adjusting.
- .long RAW_VAR(handle_label) - .Lhandler_table
-END_MACRO
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // A
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // B (byte)
- HANDLER_TABLE_OFFSET(.Lstore_char_result) // C (char)
- HANDLER_TABLE_OFFSET(.Lstore_double_result) // D (double)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // E
- HANDLER_TABLE_OFFSET(.Lstore_float_result) // F (float)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // G
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // H
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // I (int)
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // J (long)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // K
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // L (object - references are compressed and only 32-bits)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // M
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // N
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // O
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // P
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Q
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // R
- HANDLER_TABLE_OFFSET(.Lstore_long_result) // S (short)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // T
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // U
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // V (void)
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // W
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // X
- HANDLER_TABLE_OFFSET(.Lcleanup_and_return) // Y
- HANDLER_TABLE_OFFSET(.Lstore_boolean_result) // Z (boolean)
-
-.Lstore_boolean_result:
- movzbq (%rsp), %rax // Copy boolean result to the accumulator
- jmp .Lcleanup_and_return
-.Lstore_char_result:
- movzwq (%rsp), %rax // Copy char result to the accumulator
- jmp .Lcleanup_and_return
-.Lstore_float_result:
- movd (%rsp), %xmm0 // Copy float result to the context restored by
- movd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- jmp .Lcleanup_and_return
-.Lstore_double_result:
- movsd (%rsp), %xmm0 // Copy double result to the context restored by
- movsd %xmm0, 32(%rsp) // RESTORE_SAVE_REFS_AND_ARGS_FRAME.
- jmp .Lcleanup_and_return
-.Lstore_long_result:
- movq (%rsp), %rax // Copy long result to the accumulator.
- // Fall-through
-.Lcleanup_and_return:
- addq LITERAL(16), %rsp // Pop space for JValue result.
- CFI_ADJUST_CFA_OFFSET(16)
RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ movq %rax, %xmm0 // Result is in RAX. Copy to FP result register.
RETURN_OR_DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_invoke_polymorphic
+DEFINE_FUNCTION art_quick_invoke_custom
+ SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves
+ // RDI := call_site_index
+ movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
+ movq %rsp, %rdx // RDX := SP
+ call SYMBOL(artInvokeCustom) // artInvokeCustom(Thread*, SP)
+ RESTORE_SAVE_REFS_AND_ARGS_FRAME
+ movq %rax, %xmm0 // Result is in RAX. Copy to FP result register.
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_invoke_custom
+
// Wrap ExecuteSwitchImpl in assembly method which specifies DEX PC for unwinding.
// Argument 0: RDI: The context pointer for ExecuteSwitchImpl.
// Argument 1: RSI: Pointer to the templated ExecuteSwitchImpl to call.
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 45bf66446a..80b6921c8a 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -425,7 +425,7 @@ bool ArtMethod::IsPolymorphicSignature() {
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file,
uint16_t class_def_idx,
uint32_t method_idx) {
- ClassAccessor accessor(dex_file, dex_file.GetClassDef(class_def_idx));
+ ClassAccessor accessor(dex_file, class_def_idx);
uint32_t class_def_method_index = 0u;
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
if (method.GetIndex() == method_idx) {
@@ -550,7 +550,7 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
ArrayRef<const uint8_t> ArtMethod::GetQuickenedInfo() {
const DexFile& dex_file = GetDeclaringClass()->GetDexFile();
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
return ArrayRef<const uint8_t>();
}
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 70ff40d32c..ffbff88421 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -73,7 +73,7 @@ ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET \
- (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 164) * __SIZEOF_POINTER__)
+ (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 165) * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/base/mem_map_arena_pool.cc b/runtime/base/mem_map_arena_pool.cc
index 9ac7886e5d..702f0e453b 100644
--- a/runtime/base/mem_map_arena_pool.cc
+++ b/runtime/base/mem_map_arena_pool.cc
@@ -125,7 +125,7 @@ size_t MemMapArenaPool::GetBytesAllocated() const {
}
void MemMapArenaPool::FreeArenaChain(Arena* first) {
- if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ if (kRunningOnMemoryTool) {
for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
MEMORY_TOOL_MAKE_UNDEFINED(arena->memory_, arena->bytes_allocated_);
}
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index dfa14b91f0..51ca274cbb 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -290,10 +290,6 @@ inline ReaderMutexLock::~ReaderMutexLock() {
mu_.SharedUnlock(self_);
}
-// Catch bug where variable name is omitted. "ReaderMutexLock (lock);" instead of
-// "ReaderMutexLock mu(lock)".
-#define ReaderMutexLock(x) static_assert(0, "ReaderMutexLock declaration missing variable name")
-
} // namespace art
#endif // ART_RUNTIME_BASE_MUTEX_INL_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 602d183bbb..ee47e7ce56 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -525,8 +525,6 @@ class SCOPED_CAPABILITY MutexLock {
Mutex& mu_;
DISALLOW_COPY_AND_ASSIGN(MutexLock);
};
-// Catch bug where variable name is omitted. "MutexLock (lock);" instead of "MutexLock mu(lock)".
-#define MutexLock(x) static_assert(0, "MutexLock declaration missing variable name")
// Scoped locker/unlocker for a ReaderWriterMutex that acquires read access to mu upon
// construction and releases it upon destruction.
@@ -560,9 +558,6 @@ class SCOPED_CAPABILITY WriterMutexLock {
ReaderWriterMutex& mu_;
DISALLOW_COPY_AND_ASSIGN(WriterMutexLock);
};
-// Catch bug where variable name is omitted. "WriterMutexLock (lock);" instead of
-// "WriterMutexLock mu(lock)".
-#define WriterMutexLock(x) static_assert(0, "WriterMutexLock declaration missing variable name")
// For StartNoThreadSuspension and EndNoThreadSuspension.
class CAPABILITY("role") Role {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index acdb235f8c..8f9f45c30b 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -68,14 +68,14 @@ class CheckReferenceMapVisitor : public StackVisitor {
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
+ DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK_LT(reg, accessor.RegistersSize());
- DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(reg);
+ DexRegisterLocation location = dex_register_map[reg];
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
@@ -98,7 +98,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
CHECK_EQ(location.GetValue(), 0);
break;
default:
- LOG(FATAL) << "Unexpected location kind " << location.GetInternalKind();
+ LOG(FATAL) << "Unexpected location kind " << location.GetKind();
}
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index be636d80a8..4a5da1f1e9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -805,7 +805,7 @@ void ClassLinker::FinishInit(Thread* self) {
// Note: we hard code the field indexes here rather than using FindInstanceField
// as the types of the field can't be resolved prior to the runtime being
// fully initialized
- StackHandleScope<2> hs(self);
+ StackHandleScope<3> hs(self);
Handle<mirror::Class> java_lang_ref_Reference =
hs.NewHandle(GetClassRoot<mirror::Reference>(this));
Handle<mirror::Class> java_lang_ref_FinalizerReference =
@@ -847,11 +847,24 @@ void ClassLinker::FinishInit(Thread* self) {
// that Object, Class, and Object[] are setup
init_done_ = true;
+ // Under sanitization, the small carve-out to handle stack overflow might not be enough to
+ // initialize the StackOverflowError class (as it might require running the verifier). Instead,
+ // ensure that the class will be initialized.
+ if (kMemoryToolIsAvailable && !Runtime::Current()->IsAotCompiler()) {
+ verifier::MethodVerifier::Init(); // Need to prepare the verifier.
+
+ ObjPtr<mirror::Class> soe_klass = FindSystemClass(self, "Ljava/lang/StackOverflowError;");
+ if (soe_klass == nullptr || !EnsureInitialized(self, hs.NewHandle(soe_klass), true, true)) {
+ // Strange, but don't crash.
+ LOG(WARNING) << "Could not prepare StackOverflowError.";
+ self->ClearException();
+ }
+ }
+
VLOG(startup) << "ClassLinker::FinishInit exiting";
}
-void ClassLinker::RunRootClinits() {
- Thread* self = Thread::Current();
+void ClassLinker::RunRootClinits(Thread* self) {
for (size_t i = 0; i < static_cast<size_t>(ClassRoot::kMax); ++i) {
ObjPtr<mirror::Class> c = GetClassRoot(ClassRoot(i), this);
if (!c->IsArrayClass() && !c->IsPrimitive()) {
@@ -859,6 +872,8 @@ void ClassLinker::RunRootClinits() {
Handle<mirror::Class> h_class(hs.NewHandle(c));
EnsureInitialized(self, h_class, true, true);
self->AssertNoPendingException();
+ } else {
+ DCHECK(c->IsInitialized());
}
}
}
@@ -990,8 +1005,7 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) {
class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(
ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(MakeObjPtr(
spaces[0]->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots))));
- DCHECK_EQ(GetClassRoot(ClassRoot::kJavaLangClass, this)->GetClassFlags(),
- mirror::kClassFlagClass);
+ DCHECK_EQ(GetClassRoot<mirror::Class>(this)->GetClassFlags(), mirror::kClassFlagClass);
ObjPtr<mirror::Class> java_lang_Object = GetClassRoot<mirror::Object>(this);
java_lang_Object->SetObjectSize(sizeof(mirror::Object));
@@ -1243,12 +1257,12 @@ void AppImageClassLoadersAndDexCachesHelper::Update(
ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
- auto it = new_class_set->Find(ClassTable::TableSlot(klass));
+ auto it = new_class_set->find(ClassTable::TableSlot(klass));
DCHECK(it != new_class_set->end());
DCHECK_EQ(it->Read(), klass);
ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
if (super_class != nullptr && !heap->ObjectIsInBootImageSpace(super_class)) {
- auto it2 = new_class_set->Find(ClassTable::TableSlot(super_class));
+ auto it2 = new_class_set->find(ClassTable::TableSlot(super_class));
DCHECK(it2 != new_class_set->end());
DCHECK_EQ(it2->Read(), super_class);
}
@@ -1325,7 +1339,7 @@ static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file,
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(error_msg != nullptr);
std::unique_ptr<const DexFile> dex_file;
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
+ const OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
if (oat_dex_file == nullptr) {
return std::unique_ptr<const DexFile>();
}
@@ -1610,10 +1624,9 @@ bool ClassLinker::AddImageSpace(
hs.NewHandle(dex_caches_object->AsObjectArray<mirror::DexCache>()));
Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
header.GetImageRoot(ImageHeader::kClassRoots)->AsObjectArray<mirror::Class>()));
- static_assert(ImageHeader::kClassLoader + 1u == ImageHeader::kImageRootsMax,
- "Class loader should be the last image root.");
MutableHandle<mirror::ClassLoader> image_class_loader(hs.NewHandle(
- app_image ? header.GetImageRoot(ImageHeader::kClassLoader)->AsClassLoader() : nullptr));
+ app_image ? header.GetImageRoot(ImageHeader::kAppImageClassLoader)->AsClassLoader()
+ : nullptr));
DCHECK(class_roots != nullptr);
if (class_roots->GetLength() != static_cast<int32_t>(ClassRoot::kMax)) {
*error_msg = StringPrintf("Expected %d class roots but got %d",
@@ -2530,7 +2543,7 @@ ObjPtr<mirror::Class> ClassLinker::FindClass(Thread* self,
old = result_ptr; // For the comparison below, after releasing the lock.
if (descriptor_equals) {
class_table->InsertWithHash(result_ptr, hash);
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
+ WriteBarrier::ForEveryFieldWrite(class_loader.Get());
} // else throw below, after releasing the lock.
}
}
@@ -2863,9 +2876,9 @@ void ClassLinker::FixupStaticTrampolines(ObjPtr<mirror::Class> klass) {
}
const DexFile& dex_file = klass->GetDexFile();
- const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
- CHECK(dex_class_def != nullptr);
- ClassAccessor accessor(dex_file, *dex_class_def);
+ const uint16_t class_def_idx = klass->GetDexClassDefIndex();
+ CHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
+ ClassAccessor accessor(dex_file, class_def_idx);
// There should always be class data if there were direct methods.
CHECK(accessor.HasClassData()) << klass->PrettyDescriptor();
bool has_oat_class;
@@ -3145,7 +3158,7 @@ void ClassLinker::LoadClass(Thread* self,
DCHECK_EQ(klass->NumInstanceFields(), num_ifields);
}
// Ensure that the card is marked so that remembered sets pick up native roots.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass.Get());
+ WriteBarrier::ForEveryFieldWrite(klass.Get());
self->AllowThreadSuspension();
}
@@ -3332,7 +3345,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
if (class_loader != nullptr) {
// Since we added a strong root to the class table, do the write barrier as required for
// remembered sets and generational GCs.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ WriteBarrier::ForEveryFieldWrite(class_loader);
}
dex_caches_.push_back(data);
}
@@ -3392,7 +3405,7 @@ void ClassLinker::RegisterExistingDexCache(ObjPtr<mirror::DexCache> dex_cache,
if (h_class_loader.Get() != nullptr) {
// Since we added a strong root to the class table, do the write barrier as required for
// remembered sets and generational GCs.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+ WriteBarrier::ForEveryFieldWrite(h_class_loader.Get());
}
}
@@ -3463,7 +3476,7 @@ ObjPtr<mirror::DexCache> ClassLinker::RegisterDexFile(const DexFile& dex_file,
if (h_class_loader.Get() != nullptr) {
// Since we added a strong root to the class table, do the write barrier as required for
// remembered sets and generational GCs.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(h_class_loader.Get());
+ WriteBarrier::ForEveryFieldWrite(h_class_loader.Get());
}
return h_dex_cache.Get();
}
@@ -3763,7 +3776,7 @@ ObjPtr<mirror::Class> ClassLinker::InsertClass(const char* descriptor,
class_table->InsertWithHash(klass, hash);
if (class_loader != nullptr) {
// This is necessary because we need to have the card dirtied for remembered sets.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ WriteBarrier::ForEveryFieldWrite(class_loader);
}
if (log_new_roots_) {
new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
@@ -3793,7 +3806,7 @@ void ClassLinker::UpdateClassMethods(ObjPtr<mirror::Class> klass,
klass->NumDirectMethods(),
klass->NumDeclaredVirtualMethods());
// Need to mark the card so that the remembered sets and mod union tables get updated.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(klass);
+ WriteBarrier::ForEveryFieldWrite(klass);
}
ObjPtr<mirror::Class> ClassLinker::LookupClass(Thread* self,
@@ -4169,7 +4182,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file,
}
}
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
// In case we run without an image there won't be a backing oat file.
if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
return false;
@@ -4442,8 +4455,8 @@ void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod*
// Find the <init>(InvocationHandler)V method. The exact method offset varies depending
// on which front-end compiler was used to build the libcore DEX files.
- ArtMethod* proxy_constructor = proxy_class->FindConstructor(
- "(Ljava/lang/reflect/InvocationHandler;)V", image_pointer_size_);
+ ArtMethod* proxy_constructor =
+ jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Proxy_init);
DCHECK(proxy_constructor != nullptr)
<< "Could not find <init> method in java.lang.reflect.Proxy";
@@ -5197,7 +5210,7 @@ void ClassLinker::FixupTemporaryDeclaringClass(ObjPtr<mirror::Class> temp_class,
// Make sure the remembered set and mod-union tables know that we updated some of the native
// roots.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class);
+ WriteBarrier::ForEveryFieldWrite(new_class);
}
void ClassLinker::RegisterClassLoader(ObjPtr<mirror::ClassLoader> class_loader) {
@@ -5355,7 +5368,7 @@ bool ClassLinker::LinkClass(Thread* self,
if (class_loader != nullptr) {
// We updated the class in the class table, perform the write barrier so that the GC knows
// about the change.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ WriteBarrier::ForEveryFieldWrite(class_loader);
}
CHECK_EQ(existing, klass.Get());
if (log_new_roots_) {
@@ -8762,7 +8775,7 @@ void ClassLinker::InsertDexFileInToClassLoader(ObjPtr<mirror::Object> dex_file,
if (table->InsertStrongRoot(dex_file) && class_loader != nullptr) {
// It was not already inserted, perform the write barrier to let the GC know the class loader's
// class table was modified.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ WriteBarrier::ForEveryFieldWrite(class_loader);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 30c242399d..e4d9c96696 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -405,7 +405,7 @@ class ClassLinker {
// Initializes classes that have instances in the image but that have
// <clinit> methods so they could not be initialized by the compiler.
- void RunRootClinits()
+ void RunRootClinits(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index af42878e97..78ad568d25 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -147,8 +147,14 @@ inline void VisitClassLoaderDexFiles(ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ClassLoader> class_loader,
Visitor fn)
REQUIRES_SHARED(Locks::mutator_lock_) {
- auto helper = [&fn](const art::DexFile* dex_file, void** ATTRIBUTE_UNUSED)
+ auto helper = [&fn](const art::DexFile* dex_file, void** ret)
REQUIRES_SHARED(Locks::mutator_lock_) {
+#ifdef __clang_analyzer__
+ *ret = nullptr;
+#else
+ UNUSED(ret);
+#endif
+
return fn(dex_file);
};
VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index e313ec5dd7..a233357249 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -37,7 +37,7 @@ bool ClassTable::Contains(ObjPtr<mirror::Class> klass) {
ReaderMutexLock mu(Thread::Current(), lock_);
TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read() == klass;
}
@@ -49,7 +49,7 @@ mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
ReaderMutexLock mu(Thread::Current(), lock_);
TableSlot slot(klass);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read();
}
@@ -119,14 +119,14 @@ size_t ClassTable::NumReferencedZygoteClasses() const {
ReaderMutexLock mu(Thread::Current(), lock_);
size_t sum = 0;
for (size_t i = 0; i < classes_.size() - 1; ++i) {
- sum += classes_[i].Size();
+ sum += classes_[i].size();
}
return sum;
}
size_t ClassTable::NumReferencedNonZygoteClasses() const {
ReaderMutexLock mu(Thread::Current(), lock_);
- return classes_.back().Size();
+ return classes_.back().size();
}
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
@@ -145,12 +145,12 @@ ObjPtr<mirror::Class> ClassTable::TryInsert(ObjPtr<mirror::Class> klass) {
TableSlot slot(klass);
WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(slot);
+ auto it = class_set.find(slot);
if (it != class_set.end()) {
return it->Read();
}
}
- classes_.back().Insert(slot);
+ classes_.back().insert(slot);
return klass;
}
@@ -163,12 +163,12 @@ void ClassTable::Insert(ObjPtr<mirror::Class> klass) {
void ClassTable::CopyWithoutLocks(const ClassTable& source_table) {
if (kIsDebugBuild) {
for (ClassSet& class_set : classes_) {
- CHECK(class_set.Empty());
+ CHECK(class_set.empty());
}
}
for (const ClassSet& class_set : source_table.classes_) {
for (const TableSlot& slot : class_set) {
- classes_.back().Insert(slot);
+ classes_.back().insert(slot);
}
}
}
@@ -187,9 +187,9 @@ bool ClassTable::Remove(const char* descriptor) {
DescriptorHashPair pair(descriptor, ComputeModifiedUtf8Hash(descriptor));
WriterMutexLock mu(Thread::Current(), lock_);
for (ClassSet& class_set : classes_) {
- auto it = class_set.Find(pair);
+ auto it = class_set.find(pair);
if (it != class_set.end()) {
- class_set.Erase(it);
+ class_set.erase(it);
return true;
}
}
@@ -268,7 +268,7 @@ size_t ClassTable::WriteToMemory(uint8_t* ptr) const {
// default in case classes were pruned.
for (const ClassSet& class_set : classes_) {
for (const TableSlot& root : class_set) {
- combined.Insert(root);
+ combined.insert(root);
}
}
const size_t ret = combined.WriteToMemory(ptr);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 75b091d98f..be39631e44 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -182,7 +182,7 @@ void CommonRuntimeTestImpl::FinalizeSetup() {
{
ScopedObjectAccess soa(Thread::Current());
- class_linker_->RunRootClinits();
+ runtime_->RunRootClinits(soa.Self());
}
// We're back in native, take the opportunity to initialize well known classes.
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 892abb4dbb..d21973ec7e 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -220,6 +220,13 @@ class CheckJniAbortCatcher {
printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
return; \
}
+
+#define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING_WITHOUT_READ_BARRIERS() \
+ if (kRunningOnMemoryTool && kPoisonHeapReferences && !kEmitCompilerReadBarrier) { \
+ printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING WITHOUT READ BARRIERS\n"); \
+ return; \
+ }
+
} // namespace art
#endif // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/dex_register_location.cc b/runtime/dex_register_location.cc
new file mode 100644
index 0000000000..f3b09733b9
--- /dev/null
+++ b/runtime/dex_register_location.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_register_location.h"
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& stream, DexRegisterLocation::Kind kind) {
+ return stream << "Kind<" << static_cast<int32_t>(kind) << ">";
+}
+
+std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation& reg) {
+ using Kind = DexRegisterLocation::Kind;
+ switch (reg.GetKind()) {
+ case Kind::kInvalid:
+ return stream << "Invalid";
+ case Kind::kNone:
+ return stream << "None";
+ case Kind::kInStack:
+ return stream << "sp+" << reg.GetValue();
+ case Kind::kInRegister:
+ return stream << "r" << reg.GetValue();
+ case Kind::kInRegisterHigh:
+ return stream << "r" << reg.GetValue() << "/hi";
+ case Kind::kInFpuRegister:
+ return stream << "f" << reg.GetValue();
+ case Kind::kInFpuRegisterHigh:
+ return stream << "f" << reg.GetValue() << "/hi";
+ case Kind::kConstant:
+ return stream << "#" << reg.GetValue();
+ default:
+ return stream << "DexRegisterLocation(" << static_cast<uint32_t>(reg.GetKind())
+ << "," << reg.GetValue() << ")";
+ }
+}
+
+} // namespace art
diff --git a/runtime/dex_register_location.h b/runtime/dex_register_location.h
index c6d4ad2feb..98b4d41e2d 100644
--- a/runtime/dex_register_location.h
+++ b/runtime/dex_register_location.h
@@ -29,6 +29,7 @@ namespace art {
class DexRegisterLocation {
public:
enum class Kind : int32_t {
+ kInvalid = -2, // only used internally during register map decoding.
kNone = -1, // vreg has not been set.
kInStack, // vreg is on the stack, value holds the stack offset.
kConstant, // vreg is a constant value.
@@ -40,17 +41,13 @@ class DexRegisterLocation {
DexRegisterLocation(Kind kind, int32_t value) : kind_(kind), value_(value) {}
- static DexRegisterLocation None() {
- return DexRegisterLocation(Kind::kNone, 0);
- }
+ static DexRegisterLocation None() { return DexRegisterLocation(Kind::kNone, 0); }
+ static DexRegisterLocation Invalid() { return DexRegisterLocation(Kind::kInvalid, 0); }
bool IsLive() const { return kind_ != Kind::kNone; }
Kind GetKind() const { return kind_; }
- // TODO: Remove.
- Kind GetInternalKind() const { return kind_; }
-
int32_t GetValue() const { return value_; }
bool operator==(DexRegisterLocation other) const {
@@ -61,6 +58,24 @@ class DexRegisterLocation {
return !(*this == other);
}
+ int32_t GetStackOffsetInBytes() const {
+ DCHECK(kind_ == Kind::kInStack);
+ return value_;
+ }
+
+ int32_t GetConstant() const {
+ DCHECK(kind_ == Kind::kConstant);
+ return value_;
+ }
+
+ int32_t GetMachineRegister() const {
+ DCHECK(kind_ == Kind::kInRegister ||
+ kind_ == Kind::kInRegisterHigh ||
+ kind_ == Kind::kInFpuRegister ||
+ kind_ == Kind::kInFpuRegisterHigh);
+ return value_;
+ }
+
private:
DexRegisterLocation() {}
@@ -70,9 +85,8 @@ class DexRegisterLocation {
friend class DexRegisterMap; // Allow creation of uninitialized array of locations.
};
-static inline std::ostream& operator<<(std::ostream& stream, DexRegisterLocation::Kind kind) {
- return stream << "Kind<" << static_cast<int32_t>(kind) << ">";
-}
+std::ostream& operator<<(std::ostream& stream, DexRegisterLocation::Kind kind);
+std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation& reg);
} // namespace art
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 4a3d3b0da6..40ef10f904 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -46,38 +46,38 @@ namespace art {
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
const MethodInfo& method_info,
- const CodeInfo& code_info,
- const StackMap& stack_map,
- uint8_t inlining_depth)
+ const BitTableRange<InlineInfo>& inline_infos)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!outer_method->IsObsolete());
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, inlining_depth);
// This method is being used by artQuickResolutionTrampoline, before it sets up
// the passed parameters in a GC friendly way. Therefore we must never be
// suspended while executing it.
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
- if (inline_info.EncodesArtMethod()) {
- return inline_info.GetArtMethod();
- }
+ {
+ InlineInfo inline_info = inline_infos.back();
+
+ if (inline_info.EncodesArtMethod()) {
+ return inline_info.GetArtMethod();
+ }
- uint32_t method_index = inline_info.GetMethodIndex(method_info);
- if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
- // "charAt" special case. It is the only non-leaf method we inline across dex files.
- ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
- return inlined_method;
+ uint32_t method_index = inline_info.GetMethodIndex(method_info);
+ if (inline_info.GetDexPc() == static_cast<uint32_t>(-1)) {
+ // "charAt" special case. It is the only non-leaf method we inline across dex files.
+ ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+ DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
+ return inlined_method;
+ }
}
// Find which method did the call in the inlining hierarchy.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtMethod* method = outer_method;
- for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
- inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth);
+ for (InlineInfo inline_info : inline_infos) {
DCHECK(!inline_info.EncodesArtMethod());
DCHECK_NE(inline_info.GetDexPc(), static_cast<uint32_t>(-1));
- method_index = inline_info.GetMethodIndex(method_info);
+ uint32_t method_index = inline_info.GetMethodIndex(method_info);
ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
method->GetDexCache(),
method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index e71d1fa38a..0c61965908 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -74,11 +74,11 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
}
for (size_t i = 0; i < args.size(); ++i) {
if (shorty[i + 1] == 'L') {
- jobject val = args.at(i).l;
+ jobject val = args[i].l;
soa.Env()->SetObjectArrayElement(args_jobj, i, val);
} else {
JValue jv;
- jv.SetJ(args.at(i).j);
+ jv.SetJ(args[i].j);
mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv).Ptr();
if (val == nullptr) {
CHECK(soa.Self()->IsExceptionPending());
@@ -205,9 +205,9 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- uint32_t depth = code_info.GetInlineDepthOf(stack_map);
- if (depth != 0) {
- caller = GetResolvedMethod(outer_method, method_info, code_info, stack_map, depth - 1);
+ BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+ if (!inline_infos.empty()) {
+ caller = GetResolvedMethod(outer_method, method_info, inline_infos);
}
}
if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index 1804d9e64d..938489b730 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -114,9 +114,9 @@ extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, vo
extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-// Invoke polymorphic entrypoint. Return type is dynamic and may be void, a primitive value, or
-// reference return type.
+// Polymorphic invoke entrypoints.
extern "C" void art_quick_invoke_polymorphic(uint32_t, void*);
+extern "C" void art_quick_invoke_custom(uint32_t, void*);
// Thread entrypoints.
extern "C" void art_quick_test_suspend();
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 3f66045576..5dcece4208 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -106,6 +106,7 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qp
qpoints->pInvokeVirtualTrampolineWithAccessCheck =
art_quick_invoke_virtual_trampoline_with_access_check;
qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
+ qpoints->pInvokeCustom = art_quick_invoke_custom;
// Thread
qpoints->pTestSuspend = art_quick_test_suspend;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 62756123e1..85d633f6a6 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -64,7 +64,7 @@ static void StoreObjectInBss(ArtMethod* outer_method,
<< oat_file->GetLocation();
}
if (class_loader != nullptr) {
- runtime->GetHeap()->WriteBarrierEveryFieldOf(class_loader);
+ WriteBarrier::ForEveryFieldWrite(class_loader);
} else {
runtime->GetClassLinker()->WriteBarrierForBootOatFileBssRoots(oat_file);
}
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 3a8faca11d..415a158326 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -134,6 +134,7 @@
V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
V(InvokePolymorphic, void, uint32_t, void*) \
+ V(InvokeCustom, void, uint32_t, void*) \
\
V(TestSuspend, void, void) \
\
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index af6a936d40..c894406e98 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -35,6 +35,7 @@
#include "index_bss_mapping.h"
#include "instrumentation.h"
#include "interpreter/interpreter.h"
+#include "interpreter/interpreter_common.h"
#include "interpreter/shadow_frame-inl.h"
#include "jit/jit.h"
#include "linear_alloc.h"
@@ -344,10 +345,9 @@ class QuickArgumentVisitor {
CodeInfo code_info(current_code);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
DCHECK(stack_map.IsValid());
- uint32_t depth = code_info.GetInlineDepthOf(stack_map);
- if (depth != 0) {
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, depth - 1);
- return inline_info.GetDexPc();
+ BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+ if (!inline_infos.empty()) {
+ return inline_infos.back().GetDexPc();
} else {
return stack_map.GetDexPc();
}
@@ -1100,11 +1100,14 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
// that part.
ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ DCHECK(!method->IsProxyMethod())
+ << "Proxy method " << method->PrettyMethod()
+ << " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")"
+ << " should not hit instrumentation entrypoint.";
if (instrumentation->IsDeoptimized(method)) {
result = GetQuickToInterpreterBridge();
} else {
result = instrumentation->GetQuickCodeFor(method, kRuntimePointerSize);
- DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(result));
}
bool interpreter_entry = (result == GetQuickToInterpreterBridge());
@@ -1232,37 +1235,35 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
ArtMethod* caller = outer_method;
- uint32_t depth = code_info.GetInlineDepthOf(stack_map);
- if (depth != 0) {
- for (size_t d = 0; d < depth; ++d) {
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(stack_map, d);
- const char* tag = "";
- dex_pc = inline_info.GetDexPc();
- if (inline_info.EncodesArtMethod()) {
- tag = "encoded ";
- caller = inline_info.GetArtMethod();
+ BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+ for (InlineInfo inline_info : inline_infos) {
+ const char* tag = "";
+ dex_pc = inline_info.GetDexPc();
+ if (inline_info.EncodesArtMethod()) {
+ tag = "encoded ";
+ caller = inline_info.GetArtMethod();
+ } else {
+ uint32_t method_index = inline_info.GetMethodIndex(method_info);
+ if (dex_pc == static_cast<uint32_t>(-1)) {
+ tag = "special ";
+ CHECK(inline_info.Equals(inline_infos.back()));
+ caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
+ CHECK_EQ(caller->GetDexMethodIndex(), method_index);
} else {
- uint32_t method_index = inline_info.GetMethodIndex(method_info);
- if (dex_pc == static_cast<uint32_t>(-1)) {
- tag = "special ";
- CHECK_EQ(d + 1u, depth);
- caller = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
- CHECK_EQ(caller->GetDexMethodIndex(), method_index);
- } else {
- ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
- ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
- caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
- CHECK(caller != nullptr);
- }
+ ObjPtr<mirror::DexCache> dex_cache = caller->GetDexCache();
+ ObjPtr<mirror::ClassLoader> class_loader = caller->GetClassLoader();
+ caller = class_linker->LookupResolvedMethod(method_index, dex_cache, class_loader);
+ CHECK(caller != nullptr);
}
- LOG(FATAL_WITHOUT_ABORT) << "Inlined method #" << d << ": " << tag << caller->PrettyMethod()
- << " dex pc: " << dex_pc
- << " dex file: " << caller->GetDexFile()->GetLocation()
- << " class table: "
- << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
- DumpB74410240ClassData(caller->GetDeclaringClass());
- LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
}
+ LOG(FATAL_WITHOUT_ABORT) << "InlineInfo #" << inline_info.Row()
+ << ": " << tag << caller->PrettyMethod()
+ << " dex pc: " << dex_pc
+ << " dex file: " << caller->GetDexFile()->GetLocation()
+ << " class table: "
+ << class_linker->ClassTableForClassLoader(caller->GetClassLoader());
+ DumpB74410240ClassData(caller->GetDeclaringClass());
+ LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(caller, dex_pc);
}
}
@@ -2722,18 +2723,11 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_metho
reinterpret_cast<uintptr_t>(method));
}
-// Returns shorty type so the caller can determine how to put |result|
-// into expected registers. The shorty type is static so the compiler
-// could call different flavors of this code path depending on the
-// shorty type though this would require different entry points for
-// each type.
-extern "C" uintptr_t artInvokePolymorphic(
- JValue* result,
- mirror::Object* raw_receiver,
- Thread* self,
- ArtMethod** sp)
+// Returns uint64_t representing raw bits from JValue.
+extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+ DCHECK(raw_receiver != nullptr);
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
// Start new JNI local reference state
@@ -2766,18 +2760,12 @@ extern "C" uintptr_t artInvokePolymorphic(
ArtMethod* resolved_method = linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
self, inst.VRegB(), caller_method, kVirtual);
- if (UNLIKELY(receiver_handle.IsNull())) {
- ThrowNullPointerExceptionForMethodAccess(resolved_method, InvokeType::kVirtual);
- return static_cast<uintptr_t>('V');
- }
-
Handle<mirror::MethodType> method_type(
hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
-
- // This implies we couldn't resolve one or more types in this method handle.
if (UNLIKELY(method_type.IsNull())) {
+ // This implies we couldn't resolve one or more types in this method handle.
CHECK(self->IsExceptionPending());
- return static_cast<uintptr_t>('V');
+ return 0UL;
}
DCHECK_EQ(ArtMethod::NumArgRegisters(shorty) + 1u, (uint32_t)inst.VRegA());
@@ -2811,6 +2799,7 @@ extern "C" uintptr_t artInvokePolymorphic(
// consecutive order.
RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic());
+ JValue result;
bool success = false;
if (resolved_method->GetDeclaringClass() == GetClassRoot<mirror::MethodHandle>(linker)) {
Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
@@ -2821,7 +2810,7 @@ extern "C" uintptr_t artInvokePolymorphic(
method_handle,
method_type,
&operands,
- result);
+ &result);
} else {
DCHECK_EQ(static_cast<uint32_t>(intrinsic),
static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
@@ -2830,7 +2819,7 @@ extern "C" uintptr_t artInvokePolymorphic(
method_handle,
method_type,
&operands,
- result);
+ &result);
}
} else {
DCHECK_EQ(GetClassRoot<mirror::VarHandle>(linker), resolved_method->GetDeclaringClass());
@@ -2844,7 +2833,7 @@ extern "C" uintptr_t artInvokePolymorphic(
method_type,
access_mode,
&operands,
- result);
+ &result);
}
DCHECK(success || self->IsExceptionPending());
@@ -2852,7 +2841,65 @@ extern "C" uintptr_t artInvokePolymorphic(
// Pop transition record.
self->PopManagedStackFragment(fragment);
- return static_cast<uintptr_t>(shorty[0]);
+ return result.GetJ();
+}
+
+// Returns uint64_t representing raw bits from JValue.
+extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMethod** sp)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
+
+ // invoke-custom is effectively a static call (no receiver).
+ static constexpr bool kMethodIsStatic = true;
+
+ // Start new JNI local reference state
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+
+ const char* old_cause = self->StartAssertNoThreadSuspension("Making stack arguments safe.");
+
+ // From the instruction, get the |callsite_shorty| and expose arguments on the stack to the GC.
+ ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ const DexFile* dex_file = caller_method->GetDexFile();
+ const dex::ProtoIndex proto_idx(dex_file->GetProtoIndexForCallSite(call_site_idx));
+ const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
+ const uint32_t shorty_len = strlen(shorty);
+
+ // Construct the shadow frame placing arguments consecutively from |first_arg|.
+ const size_t first_arg = 0;
+ const size_t num_vregs = ArtMethod::NumArgRegisters(shorty);
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_vregs, /* link */ nullptr, caller_method, dex_pc);
+ ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
+ ScopedStackedShadowFramePusher
+ frame_pusher(self, shadow_frame, StackedShadowFrameType::kShadowFrameUnderConstruction);
+ BuildQuickShadowFrameVisitor shadow_frame_builder(sp,
+ kMethodIsStatic,
+ shorty,
+ shorty_len,
+ shadow_frame,
+ first_arg);
+ shadow_frame_builder.VisitArguments();
+
+ // Push a transition back into managed code onto the linked list in thread.
+ ManagedStack fragment;
+ self->PushManagedStackFragment(&fragment);
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ // Perform the invoke-custom operation.
+ RangeInstructionOperands operands(first_arg, num_vregs);
+ JValue result;
+ bool success =
+ interpreter::DoInvokeCustom(self, *shadow_frame, call_site_idx, &operands, &result);
+ DCHECK(success || self->IsExceptionPending());
+
+ // Pop transition record.
+ self->PopManagedStackFragment(fragment);
+
+ return result.GetJ();
}
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 1337cd5fb2..dda3ddeb76 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -287,8 +287,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
pInvokeVirtualTrampolineWithAccessCheck, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeVirtualTrampolineWithAccessCheck,
pInvokePolymorphic, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokePolymorphic,
- pTestSuspend, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokePolymorphic, pInvokeCustom, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInvokeCustom, pTestSuspend, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pTestSuspend, pDeliverException, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeliverException, pThrowArrayBounds, sizeof(void*));
diff --git a/runtime/exec_utils_test.cc b/runtime/exec_utils_test.cc
index 68edfa8b72..c138ce3f9e 100644
--- a/runtime/exec_utils_test.cc
+++ b/runtime/exec_utils_test.cc
@@ -36,10 +36,9 @@ TEST_F(ExecUtilsTest, ExecSuccess) {
command.push_back("/usr/bin/id");
}
std::string error_msg;
- if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
- EXPECT_TRUE(Exec(command, &error_msg));
- }
+ // Historical note: Running on Valgrind failed due to some memory
+ // that leaks in thread alternate signal stacks.
+ EXPECT_TRUE(Exec(command, &error_msg));
EXPECT_EQ(0U, error_msg.size()) << error_msg;
}
@@ -50,11 +49,10 @@ TEST_F(ExecUtilsTest, ExecError) {
std::vector<std::string> command;
command.push_back("bogus");
std::string error_msg;
- if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
- EXPECT_FALSE(Exec(command, &error_msg));
- EXPECT_FALSE(error_msg.empty());
- }
+ // Historical note: Running on Valgrind failed due to some memory
+ // that leaks in thread alternate signal stacks.
+ EXPECT_FALSE(Exec(command, &error_msg));
+ EXPECT_FALSE(error_msg.empty());
}
TEST_F(ExecUtilsTest, EnvSnapshotAdditionsAreNotVisible) {
@@ -72,11 +70,10 @@ TEST_F(ExecUtilsTest, EnvSnapshotAdditionsAreNotVisible) {
}
command.push_back(kModifiedVariable);
std::string error_msg;
- if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
- EXPECT_FALSE(Exec(command, &error_msg));
- EXPECT_NE(0U, error_msg.size()) << error_msg;
- }
+ // Historical note: Running on Valgrind failed due to some memory
+ // that leaks in thread alternate signal stacks.
+ EXPECT_FALSE(Exec(command, &error_msg));
+ EXPECT_NE(0U, error_msg.size()) << error_msg;
}
TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
@@ -97,11 +94,10 @@ TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
}
command.push_back(kDeletedVariable);
std::string error_msg;
- if (!(RUNNING_ON_MEMORY_TOOL && kMemoryToolDetectsLeaks)) {
- // Running on valgrind fails due to some memory that leaks in thread alternate signal stacks.
- EXPECT_TRUE(Exec(command, &error_msg));
- EXPECT_EQ(0U, error_msg.size()) << error_msg;
- }
+ // Historical note: Running on Valgrind failed due to some memory
+ // that leaks in thread alternate signal stacks.
+ EXPECT_TRUE(Exec(command, &error_msg));
+ EXPECT_EQ(0U, error_msg.size()) << error_msg;
// Restore the variable's value.
EXPECT_EQ(setenv(kDeletedVariable, save_value, kOverwrite), 0);
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 150fe956ae..30213d55c5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -625,7 +625,7 @@ class RosAlloc {
// If true, check that the returned memory is actually zero.
static constexpr bool kCheckZeroMemory = kIsDebugBuild;
- // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
+ // Do not check memory when running under a memory tool. In a normal
// build with kCheckZeroMemory the whole test should be optimized away.
// TODO: Unprotect before checks.
ALWAYS_INLINE bool ShouldCheckZeroMemory();
@@ -768,7 +768,7 @@ class RosAlloc {
// greater than or equal to this value, release pages.
const size_t page_release_size_threshold_;
- // Whether this allocator is running under Valgrind.
+ // Whether this allocator is running on a memory tool.
bool is_running_on_memory_tool_;
// The base address of the memory region that's managed by this allocator.
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index b331e975fd..36fefbdbc3 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -32,7 +32,9 @@ namespace gc {
namespace collector {
inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
- mirror::Object* ref, accounting::ContinuousSpaceBitmap* bitmap) {
+ Thread* const self,
+ mirror::Object* ref,
+ accounting::ContinuousSpaceBitmap* bitmap) {
// For the Baker-style RB, in a rare case, we could incorrectly change the object from white
// to gray even though the object has already been marked through. This happens if a mutator
// thread gets preempted before the AtomicSetReadBarrierState below, GC marks through the
@@ -63,20 +65,21 @@ inline mirror::Object* ConcurrentCopying::MarkUnevacFromSpaceRegion(
if (kUseBakerReadBarrier) {
DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
}
- PushOntoMarkStack(ref);
+ PushOntoMarkStack(self, ref);
}
return ref;
}
template<bool kGrayImmuneObject>
-inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
+inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(Thread* const self,
+ mirror::Object* ref) {
if (kUseBakerReadBarrier) {
// The GC-running thread doesn't (need to) gray immune objects except when updating thread roots
// in the thread flip on behalf of suspended threads (when gc_grays_immune_objects_ is
// true). Also, a mutator doesn't (need to) gray an immune object after GC has updated all
// immune space objects (when updated_all_immune_objects_ is true).
if (kIsDebugBuild) {
- if (Thread::Current() == thread_running_gc_) {
+ if (self == thread_running_gc_) {
DCHECK(!kGrayImmuneObject ||
updated_all_immune_objects_.load(std::memory_order_relaxed) ||
gc_grays_immune_objects_);
@@ -91,7 +94,7 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
bool success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
/* rb_state */ ReadBarrier::GrayState());
if (success) {
- MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
+ MutexLock mu(self, immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
}
}
@@ -99,7 +102,8 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) {
}
template<bool kGrayImmuneObject, bool kFromGCThread>
-inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
+inline mirror::Object* ConcurrentCopying::Mark(Thread* const self,
+ mirror::Object* from_ref,
mirror::Object* holder,
MemberOffset offset) {
if (from_ref == nullptr) {
@@ -108,7 +112,7 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
DCHECK(heap_->collector_type_ == kCollectorTypeCC);
if (kFromGCThread) {
DCHECK(is_active_);
- DCHECK_EQ(Thread::Current(), thread_running_gc_);
+ DCHECK_EQ(self, thread_running_gc_);
} else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
// In the lock word forward address state, the read barrier bits
// in the lock word are part of the stored forwarding address and
@@ -134,7 +138,7 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
mirror::Object* to_ref = GetFwdPtr(from_ref);
if (to_ref == nullptr) {
// It isn't marked yet. Mark it by copying it to the to-space.
- to_ref = Copy(from_ref, holder, offset);
+ to_ref = Copy(self, from_ref, holder, offset);
}
// The copy should either be in a to-space region, or in the
// non-moving space, if it could not fit in a to-space region.
@@ -143,7 +147,7 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
return to_ref;
}
case space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace:
- return MarkUnevacFromSpaceRegion(from_ref, region_space_bitmap_);
+ return MarkUnevacFromSpaceRegion(self, from_ref, region_space_bitmap_);
default:
// The reference is in an unused region.
LOG(FATAL_WITHOUT_ABORT) << DumpHeapReference(holder, offset, from_ref);
@@ -153,24 +157,25 @@ inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref,
}
} else {
if (immune_spaces_.ContainsObject(from_ref)) {
- return MarkImmuneSpace<kGrayImmuneObject>(from_ref);
+ return MarkImmuneSpace<kGrayImmuneObject>(self, from_ref);
} else {
- return MarkNonMoving(from_ref, holder, offset);
+ return MarkNonMoving(self, from_ref, holder, offset);
}
}
}
inline mirror::Object* ConcurrentCopying::MarkFromReadBarrier(mirror::Object* from_ref) {
mirror::Object* ret;
+ Thread* const self = Thread::Current();
// We can get here before marking starts since we gray immune objects before the marking phase.
- if (from_ref == nullptr || !Thread::Current()->GetIsGcMarking()) {
+ if (from_ref == nullptr || !self->GetIsGcMarking()) {
return from_ref;
}
// TODO: Consider removing this check when we are done investigating slow paths. b/30162165
if (UNLIKELY(mark_from_read_barrier_measurements_)) {
- ret = MarkFromReadBarrierWithMeasurements(from_ref);
+ ret = MarkFromReadBarrierWithMeasurements(self, from_ref);
} else {
- ret = Mark(from_ref);
+ ret = Mark(self, from_ref);
}
// Only set the mark bit for baker barrier.
if (kUseBakerReadBarrier && LIKELY(!rb_mark_bit_stack_full_ && ret->AtomicSetMarkBit(0, 1))) {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 2c2c437365..edaa043ce6 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -129,13 +129,14 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>* field,
bool do_atomic_update) {
+ Thread* const self = Thread::Current();
if (UNLIKELY(do_atomic_update)) {
// Used to mark the referent in DelayReferenceReferent in transaction mode.
mirror::Object* from_ref = field->AsMirrorPtr();
if (from_ref == nullptr) {
return;
}
- mirror::Object* to_ref = Mark(from_ref);
+ mirror::Object* to_ref = Mark(self, from_ref);
if (from_ref != to_ref) {
do {
if (field->AsMirrorPtr() != from_ref) {
@@ -148,7 +149,7 @@ void ConcurrentCopying::MarkHeapReference(mirror::HeapReference<mirror::Object>*
// Used for preserving soft references, should be OK to not have a CAS here since there should be
// no other threads which can trigger read barriers on the same referent during reference
// processing.
- field->Assign(Mark(field->AsMirrorPtr()));
+ field->Assign(Mark(self, field->AsMirrorPtr()));
}
}
@@ -300,6 +301,8 @@ void ConcurrentCopying::InitializePhase() {
immune_spaces_.Reset();
bytes_moved_.store(0, std::memory_order_relaxed);
objects_moved_.store(0, std::memory_order_relaxed);
+ bytes_moved_gc_thread_ = 0;
+ objects_moved_gc_thread_ = 0;
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
gc_cause == kGcCauseCollectorTransition ||
@@ -370,11 +373,12 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
if (ref != nullptr) {
- mirror::Object* to_ref = concurrent_copying_->Mark(ref);
+ mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
if (to_ref != ref) {
*root = to_ref;
}
@@ -386,11 +390,12 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
mirror::Object* ref = root->AsMirrorPtr();
- mirror::Object* to_ref = concurrent_copying_->Mark(ref);
+ mirror::Object* to_ref = concurrent_copying_->Mark(self, ref);
if (to_ref != ref) {
root->Assign(to_ref);
}
@@ -452,7 +457,7 @@ class ConcurrentCopying::FlipCallback : public Closure {
// This is safe since single threaded behavior should mean FillDummyObject does not
// happen when java_lang_Object_ is null.
if (WellKnownClasses::java_lang_Object != nullptr) {
- cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(
+ cc->java_lang_Object_ = down_cast<mirror::Class*>(cc->Mark(thread,
WellKnownClasses::ToClass(WellKnownClasses::java_lang_Object).Ptr()));
} else {
cc->java_lang_Object_ = nullptr;
@@ -1024,10 +1029,10 @@ void ConcurrentCopying::DisableMarking() {
mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst);
}
-void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) {
+void ConcurrentCopying::PushOntoFalseGrayStack(Thread* const self, mirror::Object* ref) {
CHECK(kUseBakerReadBarrier);
DCHECK(ref != nullptr);
- MutexLock mu(Thread::Current(), mark_stack_lock_);
+ MutexLock mu(self, mark_stack_lock_);
false_gray_stack_.push_back(ref);
}
@@ -1070,10 +1075,9 @@ void ConcurrentCopying::ExpandGcMarkStack() {
DCHECK(!gc_mark_stack_->IsFull());
}
-void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) {
+void ConcurrentCopying::PushOntoMarkStack(Thread* const self, mirror::Object* to_ref) {
CHECK_EQ(is_mark_stack_push_disallowed_.load(std::memory_order_relaxed), 0)
<< " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref);
- Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites?
CHECK(thread_running_gc_ != nullptr);
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) {
@@ -1409,10 +1413,10 @@ void ConcurrentCopying::ProcessMarkStack() {
}
bool ConcurrentCopying::ProcessMarkStackOnce() {
- Thread* self = Thread::Current();
- CHECK(thread_running_gc_ != nullptr);
- CHECK(self == thread_running_gc_);
- CHECK(self->GetThreadLocalMarkStack() == nullptr);
+ DCHECK(thread_running_gc_ != nullptr);
+ Thread* const self = Thread::Current();
+ DCHECK(self == thread_running_gc_);
+ DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
size_t count = 0;
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (mark_stack_mode == kMarkStackModeThreadLocal) {
@@ -1432,14 +1436,14 @@ bool ConcurrentCopying::ProcessMarkStackOnce() {
IssueEmptyCheckpoint();
// Process the shared GC mark stack with a lock.
{
- MutexLock mu(self, mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(revoked_mark_stacks_.empty());
}
while (true) {
std::vector<mirror::Object*> refs;
{
// Copy refs with lock. Note the number of refs should be small.
- MutexLock mu(self, mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
if (gc_mark_stack_->IsEmpty()) {
break;
}
@@ -1458,7 +1462,7 @@ bool ConcurrentCopying::ProcessMarkStackOnce() {
CHECK_EQ(static_cast<uint32_t>(mark_stack_mode),
static_cast<uint32_t>(kMarkStackModeGcExclusive));
{
- MutexLock mu(self, mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(revoked_mark_stacks_.empty());
}
// Process the GC mark stack in the exclusive mode. No need to take the lock.
@@ -1481,7 +1485,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
size_t count = 0;
std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
{
- MutexLock mu(Thread::Current(), mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
// Make a copy of the mark stack vector.
mark_stacks = revoked_mark_stacks_;
revoked_mark_stacks_.clear();
@@ -1493,7 +1497,7 @@ size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_acc
++count;
}
{
- MutexLock mu(Thread::Current(), mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
if (pooled_mark_stacks_.size() >= kMarkStackPoolSize) {
// The pool has enough. Delete it.
delete mark_stack;
@@ -1547,7 +1551,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
// above IsInToSpace() evaluates to true and we change the color from gray to white here in this
// else block.
if (kUseBakerReadBarrier) {
- bool success = to_ref->AtomicSetReadBarrierState</*kCasRelease*/true>(
+ bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
ReadBarrier::GrayState(),
ReadBarrier::WhiteState());
DCHECK(success) << "Must succeed as we won the race.";
@@ -1596,9 +1600,9 @@ class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
void ConcurrentCopying::SwitchToSharedMarkStackMode() {
Thread* self = Thread::Current();
- CHECK(thread_running_gc_ != nullptr);
- CHECK_EQ(self, thread_running_gc_);
- CHECK(self->GetThreadLocalMarkStack() == nullptr);
+ DCHECK(thread_running_gc_ != nullptr);
+ DCHECK(self == thread_running_gc_);
+ DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
static_cast<uint32_t>(kMarkStackModeThreadLocal));
@@ -1614,9 +1618,9 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() {
void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
Thread* self = Thread::Current();
- CHECK(thread_running_gc_ != nullptr);
- CHECK_EQ(self, thread_running_gc_);
- CHECK(self->GetThreadLocalMarkStack() == nullptr);
+ DCHECK(thread_running_gc_ != nullptr);
+ DCHECK(self == thread_running_gc_);
+ DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
static_cast<uint32_t>(kMarkStackModeShared));
@@ -1629,14 +1633,14 @@ void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() {
void ConcurrentCopying::CheckEmptyMarkStack() {
Thread* self = Thread::Current();
- CHECK(thread_running_gc_ != nullptr);
- CHECK_EQ(self, thread_running_gc_);
- CHECK(self->GetThreadLocalMarkStack() == nullptr);
+ DCHECK(thread_running_gc_ != nullptr);
+ DCHECK(self == thread_running_gc_);
+ DCHECK(thread_running_gc_->GetThreadLocalMarkStack() == nullptr);
MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed);
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Thread-local mark stack mode.
RevokeThreadLocalMarkStacks(false, nullptr);
- MutexLock mu(Thread::Current(), mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
if (!revoked_mark_stacks_.empty()) {
for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
while (!mark_stack->IsEmpty()) {
@@ -1655,7 +1659,7 @@ void ConcurrentCopying::CheckEmptyMarkStack() {
}
} else {
// Shared, GC-exclusive, or off.
- MutexLock mu(Thread::Current(), mark_stack_lock_);
+ MutexLock mu(thread_running_gc_, mark_stack_lock_);
CHECK(gc_mark_stack_->IsEmpty());
CHECK(revoked_mark_stacks_.empty());
}
@@ -1755,9 +1759,9 @@ void ConcurrentCopying::ReclaimPhase() {
const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
- uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst);
+ uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst) + bytes_moved_gc_thread_;
cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed);
- uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst);
+ uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst) + objects_moved_gc_thread_;
cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed);
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
@@ -1818,7 +1822,7 @@ void ConcurrentCopying::ReclaimPhase() {
std::string ConcurrentCopying::DumpReferenceInfo(mirror::Object* ref,
const char* ref_name,
- std::string indent) {
+ const char* indent) {
std::ostringstream oss;
oss << indent << heap_->GetVerification()->DumpObjectInfo(ref, ref_name) << '\n';
if (ref != nullptr) {
@@ -1842,13 +1846,13 @@ std::string ConcurrentCopying::DumpHeapReference(mirror::Object* obj,
MemberOffset offset,
mirror::Object* ref) {
std::ostringstream oss;
- std::string indent = " ";
- oss << indent << "Invalid reference: ref=" << ref
+ constexpr const char* kIndent = " ";
+ oss << kIndent << "Invalid reference: ref=" << ref
<< " referenced from: object=" << obj << " offset= " << offset << '\n';
// Information about `obj`.
- oss << DumpReferenceInfo(obj, "obj", indent) << '\n';
+ oss << DumpReferenceInfo(obj, "obj", kIndent) << '\n';
// Information about `ref`.
- oss << DumpReferenceInfo(ref, "ref", indent);
+ oss << DumpReferenceInfo(ref, "ref", kIndent);
return oss.str();
}
@@ -1924,10 +1928,10 @@ class RootPrinter {
std::string ConcurrentCopying::DumpGcRoot(mirror::Object* ref) {
std::ostringstream oss;
- std::string indent = " ";
- oss << indent << "Invalid GC root: ref=" << ref << '\n';
+ constexpr const char* kIndent = " ";
+ oss << kIndent << "Invalid GC root: ref=" << ref << '\n';
// Information about `ref`.
- oss << DumpReferenceInfo(ref, "ref", indent);
+ oss << DumpReferenceInfo(ref, "ref", kIndent);
return oss.str();
}
@@ -2075,8 +2079,8 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o
// Used to scan ref fields of an object.
class ConcurrentCopying::RefFieldsVisitor {
public:
- explicit RefFieldsVisitor(ConcurrentCopying* collector)
- : collector_(collector) {}
+ explicit RefFieldsVisitor(ConcurrentCopying* collector, Thread* const thread)
+ : collector_(collector), thread_(thread) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
@@ -2101,11 +2105,12 @@ class ConcurrentCopying::RefFieldsVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
+ collector_->MarkRoot</*kGrayImmuneObject*/false>(thread_, root);
}
private:
ConcurrentCopying* const collector_;
+ Thread* const thread_;
};
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
@@ -2117,12 +2122,12 @@ inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
}
DCHECK(!region_space_->IsInFromSpace(to_ref));
DCHECK_EQ(Thread::Current(), thread_running_gc_);
- RefFieldsVisitor visitor(this);
+ RefFieldsVisitor visitor(this, thread_running_gc_);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
if (kDisallowReadBarrierDuringScan && !Runtime::Current()->IsActiveTransaction()) {
- Thread::Current()->ModifyDebugDisallowReadBarrier(-1);
+ thread_running_gc_->ModifyDebugDisallowReadBarrier(-1);
}
}
@@ -2131,6 +2136,7 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(
+ thread_running_gc_,
ref,
/*holder*/ obj,
offset);
@@ -2147,19 +2153,22 @@ inline void ConcurrentCopying::Process(mirror::Object* obj, MemberOffset offset)
break;
}
// Use release CAS to make sure threads reading the reference see contents of copied objects.
- } while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>(
+ } while (!obj->CasFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(
offset,
expected_ref,
- new_ref));
+ new_ref,
+ CASMode::kWeak,
+ std::memory_order_release));
}
// Process some roots.
inline void ConcurrentCopying::VisitRoots(
mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) {
+ Thread* const self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
- mirror::Object* to_ref = Mark(ref);
+ mirror::Object* to_ref = Mark(self, ref);
if (to_ref == ref) {
continue;
}
@@ -2176,10 +2185,11 @@ inline void ConcurrentCopying::VisitRoots(
}
template<bool kGrayImmuneObject>
-inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Object>* root) {
+inline void ConcurrentCopying::MarkRoot(Thread* const self,
+ mirror::CompressedReference<mirror::Object>* root) {
DCHECK(!root->IsNull());
mirror::Object* const ref = root->AsMirrorPtr();
- mirror::Object* to_ref = Mark<kGrayImmuneObject>(ref);
+ mirror::Object* to_ref = Mark<kGrayImmuneObject>(self, ref);
if (to_ref != ref) {
auto* addr = reinterpret_cast<Atomic<mirror::CompressedReference<mirror::Object>>*>(root);
auto expected_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(ref);
@@ -2197,11 +2207,12 @@ inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Obje
inline void ConcurrentCopying::VisitRoots(
mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED) {
+ Thread* const self = Thread::Current();
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
// kGrayImmuneObject is true because this is used for the thread flip.
- MarkRoot</*kGrayImmuneObject*/true>(root);
+ MarkRoot</*kGrayImmuneObject*/true>(self, root);
}
}
}
@@ -2235,7 +2246,9 @@ class ConcurrentCopying::ScopedGcGraysImmuneObjects {
// Fill the given memory block with a dummy object. Used to fill in a
// copy of objects that was lost in race.
-void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) {
+void ConcurrentCopying::FillWithDummyObject(Thread* const self,
+ mirror::Object* dummy_obj,
+ size_t byte_size) {
// GC doesn't gray immune objects while scanning immune objects. But we need to trigger the read
// barriers here because we need the updated reference to the int array class, etc. Temporary set
// gc_grays_immune_objects_ to true so that we won't cause a DCHECK failure in MarkImmuneSpace().
@@ -2245,7 +2258,7 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by
// Avoid going through read barrier for since kDisallowReadBarrierDuringScan may be enabled.
// Explicitly mark to make sure to get an object in the to-space.
mirror::Class* int_array_class = down_cast<mirror::Class*>(
- Mark(GetClassRoot<mirror::IntArray, kWithoutReadBarrier>().Ptr()));
+ Mark(self, GetClassRoot<mirror::IntArray, kWithoutReadBarrier>().Ptr()));
CHECK(int_array_class != nullptr);
if (ReadBarrier::kEnableToSpaceInvariantChecks) {
AssertToSpaceInvariant(nullptr, MemberOffset(0), int_array_class);
@@ -2279,10 +2292,9 @@ void ConcurrentCopying::FillWithDummyObject(mirror::Object* dummy_obj, size_t by
}
// Reuse the memory blocks that were copy of objects that were lost in race.
-mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
+mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(Thread* const self, size_t alloc_size) {
// Try to reuse the blocks that were unused due to CAS failures.
CHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
- Thread* self = Thread::Current();
size_t min_object_size = RoundUp(sizeof(mirror::Object), space::RegionSpace::kAlignment);
size_t byte_size;
uint8_t* addr;
@@ -2326,7 +2338,8 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
// FillWithDummyObject may mark an object, avoid holding skipped_blocks_lock_ to prevent lock
// violation and possible deadlock. The deadlock case is a recursive case:
// FillWithDummyObject -> Mark(IntArray.class) -> Copy -> AllocateInSkippedBlock.
- FillWithDummyObject(reinterpret_cast<mirror::Object*>(addr + alloc_size),
+ FillWithDummyObject(self,
+ reinterpret_cast<mirror::Object*>(addr + alloc_size),
byte_size - alloc_size);
CHECK(region_space_->IsInToSpace(reinterpret_cast<mirror::Object*>(addr + alloc_size)));
{
@@ -2337,7 +2350,8 @@ mirror::Object* ConcurrentCopying::AllocateInSkippedBlock(size_t alloc_size) {
return reinterpret_cast<mirror::Object*>(addr);
}
-mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
+mirror::Object* ConcurrentCopying::Copy(Thread* const self,
+ mirror::Object* from_ref,
mirror::Object* holder,
MemberOffset offset) {
DCHECK(region_space_->IsInFromSpace(from_ref));
@@ -2366,7 +2380,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
} else {
// Failed to allocate in the region space. Try the skipped blocks.
- to_ref = AllocateInSkippedBlock(region_space_alloc_size);
+ to_ref = AllocateInSkippedBlock(self, region_space_alloc_size);
if (to_ref != nullptr) {
// Succeeded to allocate in a skipped block.
if (heap_->use_tlab_) {
@@ -2386,7 +2400,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
<< " skipped_objects="
<< to_space_objects_skipped_.load(std::memory_order_seq_cst);
}
- to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
+ to_ref = heap_->non_moving_space_->Alloc(self, obj_size,
&non_moving_space_bytes_allocated, nullptr, &dummy);
if (UNLIKELY(to_ref == nullptr)) {
LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
@@ -2427,7 +2441,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
// the forwarding pointer first. Make the lost copy (to_ref)
// look like a valid but dead (dummy) object and keep it for
// future reuse.
- FillWithDummyObject(to_ref, bytes_allocated);
+ FillWithDummyObject(self, to_ref, bytes_allocated);
if (!fall_back_to_non_moving) {
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
@@ -2438,7 +2452,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_seq_cst);
to_space_objects_skipped_.fetch_add(1, std::memory_order_seq_cst);
- MutexLock mu(Thread::Current(), skipped_blocks_lock_);
+ MutexLock mu(self, skipped_blocks_lock_);
skipped_blocks_map_.insert(std::make_pair(bytes_allocated,
reinterpret_cast<uint8_t*>(to_ref)));
}
@@ -2450,7 +2464,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
heap_mark_bitmap_->GetContinuousSpaceBitmap(to_ref);
CHECK(mark_bitmap != nullptr);
CHECK(mark_bitmap->Clear(to_ref));
- heap_->non_moving_space_->Free(Thread::Current(), to_ref);
+ heap_->non_moving_space_->Free(self, to_ref);
}
// Get the winner's forward ptr.
@@ -2478,11 +2492,21 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
LockWord new_lock_word = LockWord::FromForwardingAddress(reinterpret_cast<size_t>(to_ref));
// Try to atomically write the fwd ptr.
- bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word);
+ bool success = from_ref->CasLockWord(old_lock_word,
+ new_lock_word,
+ CASMode::kWeak,
+ std::memory_order_relaxed);
if (LIKELY(success)) {
// The CAS succeeded.
- objects_moved_.fetch_add(1, std::memory_order_relaxed);
- bytes_moved_.fetch_add(region_space_alloc_size, std::memory_order_relaxed);
+ DCHECK(thread_running_gc_ != nullptr);
+ if (LIKELY(self == thread_running_gc_)) {
+ objects_moved_gc_thread_ += 1;
+ bytes_moved_gc_thread_ += region_space_alloc_size;
+ } else {
+ objects_moved_.fetch_add(1, std::memory_order_relaxed);
+ bytes_moved_.fetch_add(region_space_alloc_size, std::memory_order_relaxed);
+ }
+
if (LIKELY(!fall_back_to_non_moving)) {
DCHECK(region_space_->IsInToSpace(to_ref));
} else {
@@ -2494,7 +2518,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
}
DCHECK(GetFwdPtr(from_ref) == to_ref);
CHECK_NE(to_ref->GetLockWord(false).GetState(), LockWord::kForwardingAddress);
- PushOntoMarkStack(to_ref);
+ PushOntoMarkStack(self, to_ref);
return to_ref;
} else {
// The CAS failed. It may have lost the race or may have failed
@@ -2573,7 +2597,8 @@ bool ConcurrentCopying::IsOnAllocStack(mirror::Object* ref) {
return alloc_stack->Contains(ref);
}
-mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref,
+mirror::Object* ConcurrentCopying::MarkNonMoving(Thread* const self,
+ mirror::Object* ref,
mirror::Object* holder,
MemberOffset offset) {
// ref is in a non-moving space (from_ref == to_ref).
@@ -2587,16 +2612,8 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref,
bool is_los = mark_bitmap == nullptr;
if (!is_los && mark_bitmap->Test(ref)) {
// Already marked.
- if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
- ref->GetReadBarrierState() == ReadBarrier::WhiteState());
- }
} else if (is_los && los_bitmap->Test(ref)) {
// Already marked in LOS.
- if (kUseBakerReadBarrier) {
- DCHECK(ref->GetReadBarrierState() == ReadBarrier::GrayState() ||
- ref->GetReadBarrierState() == ReadBarrier::WhiteState());
- }
} else {
// Not marked.
if (IsOnAllocStack(ref)) {
@@ -2636,20 +2653,20 @@ mirror::Object* ConcurrentCopying::MarkNonMoving(mirror::Object* ref,
// Already marked.
if (kUseBakerReadBarrier && cas_success &&
ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
- PushOntoFalseGrayStack(ref);
+ PushOntoFalseGrayStack(self, ref);
}
} else if (is_los && los_bitmap->AtomicTestAndSet(ref)) {
// Already marked in LOS.
if (kUseBakerReadBarrier && cas_success &&
ref->GetReadBarrierState() == ReadBarrier::GrayState()) {
- PushOntoFalseGrayStack(ref);
+ PushOntoFalseGrayStack(self, ref);
}
} else {
// Newly marked.
if (kUseBakerReadBarrier) {
DCHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::GrayState());
}
- PushOntoMarkStack(ref);
+ PushOntoMarkStack(self, ref);
}
}
}
@@ -2742,7 +2759,7 @@ bool ConcurrentCopying::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror
}
mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
- return Mark(from_ref);
+ return Mark(Thread::Current(), from_ref);
}
void ConcurrentCopying::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
@@ -2763,15 +2780,16 @@ void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
region_space_->RevokeAllThreadLocalBuffers();
}
-mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) {
- if (Thread::Current() != thread_running_gc_) {
+mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(Thread* const self,
+ mirror::Object* from_ref) {
+ if (self != thread_running_gc_) {
rb_slow_path_count_.fetch_add(1u, std::memory_order_relaxed);
} else {
rb_slow_path_count_gc_.fetch_add(1u, std::memory_order_relaxed);
}
ScopedTrace tr(__FUNCTION__);
const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u;
- mirror::Object* ret = Mark(from_ref);
+ mirror::Object* ret = Mark(self, from_ref);
if (measure_read_barrier_slow_path_) {
rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index a00dbb58d0..448525d013 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -112,7 +112,8 @@ class ConcurrentCopying : public GarbageCollector {
}
template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
// Mark object `from_ref`, copying it to the to-space if needed.
- ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
+ ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
+ mirror::Object* from_ref,
mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -144,9 +145,11 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- mirror::Object* Copy(mirror::Object* from_ref,
+ mirror::Object* Copy(Thread* const self,
+ mirror::Object* from_ref,
mirror::Object* holder,
MemberOffset offset)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -162,7 +165,7 @@ class ConcurrentCopying : public GarbageCollector {
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
- void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
+ void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
@@ -220,10 +223,10 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void MarkZygoteLargeObjects()
REQUIRES_SHARED(Locks::mutator_lock_);
- void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
+ void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
+ mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
@@ -239,7 +242,7 @@ class ConcurrentCopying : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_);
// Dump information about reference `ref` and return it as a string.
// Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
- std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, std::string indent = "")
+ std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
REQUIRES_SHARED(Locks::mutator_lock_);
// Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
// and return it as a string.
@@ -253,25 +256,30 @@ class ConcurrentCopying : public GarbageCollector {
void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* MarkNonMoving(mirror::Object* from_ref,
+ mirror::Object* MarkNonMoving(Thread* const self,
+ mirror::Object* from_ref,
mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
- ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
+ ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
+ mirror::Object* from_ref,
accounting::SpaceBitmap<kObjectAlignment>* bitmap)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
template<bool kGrayImmuneObject>
- ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
+ ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
+ mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
- void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ void PushOntoFalseGrayStack(Thread* const self, mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ScanImmuneObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
+ mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
+ mirror::Object* from_ref)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
@@ -330,8 +338,12 @@ class ConcurrentCopying : public GarbageCollector {
bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
// How many objects and bytes we moved. Used for accounting.
- Atomic<size_t> bytes_moved_;
- Atomic<size_t> objects_moved_;
+ // GC thread moves many more objects than mutators.
+ // Therefore, we separate the two to avoid CAS.
+ Atomic<size_t> bytes_moved_; // Used by mutators
+ Atomic<size_t> objects_moved_; // Used by mutators
+ size_t bytes_moved_gc_thread_; // Used by GC
+ size_t objects_moved_gc_thread_; // Used by GC
Atomic<uint64_t> cumulative_bytes_moved_;
Atomic<uint64_t> cumulative_objects_moved_;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 681ac2ef28..8cd484fc48 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -48,6 +48,7 @@
#include "runtime.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "write_barrier-inl.h"
using ::art::mirror::Object;
@@ -531,7 +532,7 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// Dirty the card at the destionation as it may contain
// references (including the class pointer) to the bump pointer
// space.
- GetHeap()->WriteBarrierEveryFieldOf(forward_address);
+ WriteBarrier::ForEveryFieldWrite(forward_address);
// Handle the bitmaps marking.
accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 948d23303c..791d0378d1 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -37,6 +37,7 @@
#include "runtime.h"
#include "thread-inl.h"
#include "verify_object.h"
+#include "write_barrier-inl.h"
namespace art {
namespace gc {
@@ -151,7 +152,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
// enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
// cases because we don't directly allocate into the main alloc
// space (besides promotions) under the SS/GSS collector.
- WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
+ WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
@@ -272,7 +273,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self,
}
case kAllocatorTypeRosAlloc: {
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
- // If running on valgrind or asan, we should be using the instrumented path.
+ // If running on ASan, we should be using the instrumented path.
size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
max_bytes_tl_bulk_allocated,
@@ -303,7 +304,7 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self,
}
case kAllocatorTypeDlMalloc: {
if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
- // If running on valgrind, we should be using the instrumented path.
+ // If running on ASan, we should be using the instrumented path.
ret = dlmalloc_space_->Alloc(self,
alloc_size,
bytes_allocated,
@@ -418,22 +419,6 @@ inline void Heap::CheckConcurrentGC(Thread* self,
}
}
-inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
- MemberOffset offset ATTRIBUTE_UNUSED,
- ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
- card_table_->MarkCard(dst.Ptr());
-}
-
-inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
- int start_offset ATTRIBUTE_UNUSED,
- size_t length ATTRIBUTE_UNUSED) {
- card_table_->MarkCard(dst.Ptr());
-}
-
-inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
- card_table_->MarkCard(obj.Ptr());
-}
-
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 25ed652b41..58becb1d09 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -37,7 +37,6 @@
#include "base/systrace.h"
#include "base/time_utils.h"
#include "common_throws.h"
-#include "cutils/sched_policy.h"
#include "debugger.h"
#include "dex/dex_file-inl.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
@@ -2248,7 +2247,8 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
// Add a new bin with the remaining space.
AddBin(size - alloc_size, pos + alloc_size);
}
- // Copy the object over to its new location. Don't use alloc_size to avoid valgrind error.
+ // Copy the object over to its new location.
+ // Historical note: We did not use `alloc_size` to avoid a Valgrind error.
memcpy(reinterpret_cast<void*>(forward_address), obj, obj_size);
if (kUseBakerReadBarrier) {
obj->AssertReadBarrierState();
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 609d2ab30e..d01437299f 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -466,23 +466,6 @@ class Heap {
// Record the bytes freed by thread-local buffer revoke.
void RecordFreeRevoke();
- // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
- // The call is not needed if null is stored in the field.
- ALWAYS_INLINE void WriteBarrierField(ObjPtr<mirror::Object> dst,
- MemberOffset offset,
- ObjPtr<mirror::Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- // Write barrier for array operations that update many field positions
- ALWAYS_INLINE void WriteBarrierArray(ObjPtr<mirror::Object> dst,
- int start_offset,
- // TODO: element_count or byte_count?
- size_t length)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- ALWAYS_INLINE void WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
accounting::CardTable* GetCardTable() const {
return card_table_.get();
}
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index 38695332bb..6caca84854 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -83,7 +83,7 @@ TEST_F(VerificationTest, IsValidClassOrNotInHeap) {
}
TEST_F(VerificationTest, IsValidClassInHeap) {
- TEST_DISABLED_FOR_MEMORY_TOOL();
+ TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
ScopedObjectAccess soa(Thread::Current());
VariableSizedHandleScope hs(soa.Self());
Handle<mirror::String> string(
@@ -106,7 +106,7 @@ TEST_F(VerificationTest, DumpInvalidObjectInfo) {
}
TEST_F(VerificationTest, DumpValidObjectInfo) {
- TEST_DISABLED_FOR_MEMORY_TOOL();
+ TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
@@ -126,7 +126,7 @@ TEST_F(VerificationTest, DumpValidObjectInfo) {
}
TEST_F(VerificationTest, LogHeapCorruption) {
- TEST_DISABLED_FOR_MEMORY_TOOL();
+ TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
@@ -147,7 +147,6 @@ TEST_F(VerificationTest, LogHeapCorruption) {
}
TEST_F(VerificationTest, FindPathFromRootSet) {
- TEST_DISABLED_FOR_MEMORY_TOOL();
ScopedLogSeverity sls(LogSeverity::INFO);
ScopedObjectAccess soa(Thread::Current());
Runtime* const runtime = Runtime::Current();
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e754fbcbae..cbce940337 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1875,7 +1875,7 @@ std::string ImageSpace::GetMultiImageBootClassPath(
bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
const ArtDexFileLoader dex_file_loader;
- for (const OatFile::OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
+ for (const OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
// Skip multidex locations - These will be checked when we visit their
@@ -1909,9 +1909,9 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg
std::string multi_dex_location = DexFileLoader::GetMultiDexLocation(
i,
dex_file_location.c_str());
- const OatFile::OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
- nullptr,
- error_msg);
+ const OatDexFile* multi_dex = oat_file.GetOatDexFile(multi_dex_location.c_str(),
+ nullptr,
+ error_msg);
if (multi_dex == nullptr) {
*error_msg = StringPrintf("ValidateOatFile oat file '%s' is missing entry '%s'",
oat_file.GetLocation().c_str(),
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 512cde484d..a24ca32314 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -45,8 +45,9 @@ class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
}
~MemoryToolLargeObjectMapSpace() OVERRIDE {
- // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
- // freed since they are held live by the class linker.
+ // Historical note: We were deleting large objects to keep Valgrind happy if there were
+ // any large objects such as Dex cache arrays which aren't freed since they are held live
+ // by the class linker.
MutexLock mu(Thread::Current(), lock_);
for (auto& m : large_objects_) {
delete m.second.mem_map;
diff --git a/runtime/gc/space/memory_tool_malloc_space-inl.h b/runtime/gc/space/memory_tool_malloc_space-inl.h
index 8282f3dda7..c022171082 100644
--- a/runtime/gc/space/memory_tool_malloc_space-inl.h
+++ b/runtime/gc/space/memory_tool_malloc_space-inl.h
@@ -30,11 +30,14 @@ namespace space {
namespace memory_tool_details {
template <size_t kMemoryToolRedZoneBytes, bool kUseObjSizeForUsable>
-inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
- size_t bytes_allocated, size_t usable_size,
- size_t bytes_tl_bulk_allocated,
- size_t* bytes_allocated_out, size_t* usable_size_out,
- size_t* bytes_tl_bulk_allocated_out) {
+inline mirror::Object* AdjustForMemoryTool(void* obj_with_rdz,
+ size_t num_bytes,
+ size_t bytes_allocated,
+ size_t usable_size,
+ size_t bytes_tl_bulk_allocated,
+ size_t* bytes_allocated_out,
+ size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
if (bytes_allocated_out != nullptr) {
*bytes_allocated_out = bytes_allocated;
}
@@ -84,24 +87,31 @@ template <typename S,
bool kUseObjSizeForUsable>
mirror::Object*
MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::AllocWithGrowth(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocWithGrowth(
+ Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated_out,
+ size_t* usable_size_out,
size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
- &bytes_allocated, &usable_size,
+ void* obj_with_rdz = S::AllocWithGrowth(self,
+ num_bytes + 2 * kMemoryToolRedZoneBytes,
+ &bytes_allocated,
+ &usable_size,
&bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
- obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
+ return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz,
+ num_bytes,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated,
bytes_allocated_out,
usable_size_out,
@@ -113,27 +123,35 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
mirror::Object* MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::Alloc(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::Alloc(
+ Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated_out,
+ size_t* usable_size_out,
size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
- &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
+ void* obj_with_rdz = S::Alloc(self,
+ num_bytes + 2 * kMemoryToolRedZoneBytes,
+ &bytes_allocated,
+ &usable_size,
+ &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes,
- kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
- bytes_tl_bulk_allocated,
- bytes_allocated_out,
- usable_size_out,
- bytes_tl_bulk_allocated_out);
+ return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz,
+ num_bytes,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated,
+ bytes_allocated_out,
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -141,24 +159,31 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
mirror::Object* MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::AllocThreadUnsafe(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocThreadUnsafe(
+ Thread* self,
+ size_t num_bytes,
+ size_t* bytes_allocated_out,
+ size_t* usable_size_out,
size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
size_t bytes_tl_bulk_allocated;
- void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kMemoryToolRedZoneBytes,
- &bytes_allocated, &usable_size,
+ void* obj_with_rdz = S::AllocThreadUnsafe(self,
+ num_bytes + 2 * kMemoryToolRedZoneBytes,
+ &bytes_allocated,
+ &usable_size,
&bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return memory_tool_details::AdjustForValgrind<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
- obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
+ return memory_tool_details::AdjustForMemoryTool<kMemoryToolRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz,
+ num_bytes,
+ bytes_allocated,
+ usable_size,
bytes_tl_bulk_allocated,
bytes_allocated_out,
usable_size_out,
@@ -170,12 +195,14 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
size_t MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::AllocationSize(
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::AllocationSize(
mirror::Object* obj, size_t* usable_size) {
- size_t result = S::AllocationSize(reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uint8_t*>(obj) - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
+ size_t result = S::AllocationSize(
+ reinterpret_cast<mirror::Object*>(
+ reinterpret_cast<uint8_t*>(obj)
+ - (kAdjustForRedzoneInAllocSize ? kMemoryToolRedZoneBytes : 0)),
usable_size);
if (usable_size != nullptr) {
if (kUseObjSizeForUsable) {
@@ -192,10 +219,9 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
size_t MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::Free(
- Thread* self, mirror::Object* ptr) {
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::Free(Thread* self, mirror::Object* ptr) {
void* obj_after_rdz = reinterpret_cast<void*>(ptr);
uint8_t* obj_with_rdz = reinterpret_cast<uint8_t*>(obj_after_rdz) - kMemoryToolRedZoneBytes;
@@ -220,10 +246,10 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
size_t MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::FreeList(
- Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::FreeList(
+ Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
size_t freed = 0;
for (size_t i = 0; i < num_ptrs; i++) {
freed += Free(self, ptrs[i]);
@@ -238,11 +264,12 @@ template <typename S,
bool kUseObjSizeForUsable>
template <typename... Params>
MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::MemoryToolMallocSpace(
- MemMap* mem_map, size_t initial_size, Params... params) : S(mem_map, initial_size, params...) {
- // Don't want to change the valgrind states of the mem map here as the allocator is already
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::MemoryToolMallocSpace(
+ MemMap* mem_map, size_t initial_size, Params... params)
+ : S(mem_map, initial_size, params...) {
+ // Don't want to change the memory tool states of the mem map here as the allocator is already
// initialized at this point and that may interfere with what the allocator does internally. Note
// that the tail beyond the initial size is mprotected.
}
@@ -252,9 +279,9 @@ template <typename S,
bool kAdjustForRedzoneInAllocSize,
bool kUseObjSizeForUsable>
size_t MemoryToolMallocSpace<S,
- kMemoryToolRedZoneBytes,
- kAdjustForRedzoneInAllocSize,
- kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
+ kMemoryToolRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kMemoryToolRedZoneBytes);
}
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index e7865363a1..b0402e4b83 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -77,7 +77,7 @@ RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::strin
// Everything is set so record in immutable structure and leave
uint8_t* begin = mem_map->Begin();
- // TODO: Fix RosAllocSpace to support Valgrind/ASan. There is currently some issues with
+ // TODO: Fix RosAllocSpace to support ASan. There is currently some issues with
// AllocationSize caused by redzones. b/12944686
if (running_on_memory_tool) {
return new MemoryToolMallocSpace<RosAllocSpace, kDefaultMemoryToolRedZoneBytes, false, true>(
@@ -382,12 +382,12 @@ size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usab
size_t size = obj->SizeOf<kVerifyNone>();
bool add_redzones = false;
if (kMaybeIsRunningOnMemoryTool) {
- add_redzones = RUNNING_ON_MEMORY_TOOL ? kMemoryToolAddsRedzones : 0;
+ add_redzones = kRunningOnMemoryTool && kMemoryToolAddsRedzones;
if (add_redzones) {
size += 2 * kDefaultMemoryToolRedZoneBytes;
}
} else {
- DCHECK_EQ(RUNNING_ON_MEMORY_TOOL, 0U);
+ DCHECK(!kRunningOnMemoryTool);
}
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index 9d16b87b7d..4c17233360 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -159,8 +159,8 @@ class RosAllocSpace : public MallocSpace {
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode) OVERRIDE {
- return CreateRosAlloc(base, morecore_start, initial_size, maximum_size, low_memory_mode,
- RUNNING_ON_MEMORY_TOOL != 0);
+ return CreateRosAlloc(
+ base, morecore_start, initial_size, maximum_size, low_memory_mode, kRunningOnMemoryTool);
}
static allocator::RosAlloc* CreateRosAlloc(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode,
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 7bd5a6a68a..c9e8426340 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -24,7 +24,6 @@
#include "hprof.h"
-#include <cutils/open_memstream.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
diff --git a/runtime/image.cc b/runtime/image.cc
index 17fc664bd7..7819c0bc00 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '1', '\0' }; // Pre-allocated Throwables.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '2', '\0' }; // Boot image live objects.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/image.h b/runtime/image.h
index c6fc052a60..c1cde0a74a 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -211,8 +211,12 @@ class PACKED(4) ImageHeader {
kOomeWhenThrowingOome, // Pre-allocated OOME when throwing OOME.
kOomeWhenHandlingStackOverflow, // Pre-allocated OOME when handling StackOverflowError.
kNoClassDefFoundError, // Pre-allocated NoClassDefFoundError.
- kClassLoader, // App image only.
+ kSpecialRoots, // Different for boot image and app image, see aliases below.
kImageRootsMax,
+
+ // Aliases.
+ kAppImageClassLoader = kSpecialRoots, // The class loader used to build the app image.
+ kBootImageLiveObjects = kSpecialRoots, // Array of boot image objects that must be kept live.
};
enum ImageSections {
@@ -229,8 +233,10 @@ class PACKED(4) ImageHeader {
kSectionCount, // Number of elements in enum.
};
- static size_t NumberOfImageRoots(bool app_image) {
- return app_image ? kImageRootsMax : kImageRootsMax - 1u;
+ static size_t NumberOfImageRoots(bool app_image ATTRIBUTE_UNUSED) {
+ // At the moment, boot image and app image have the same number of roots,
+ // though the meaning of the kSpecialRoots is different.
+ return kImageRootsMax;
}
ArtMethod* GetImageMethod(ImageMethod index) const;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index d7f33d5e43..e5cdef749e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -154,8 +154,16 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
return;
}
// Don't stub Proxy.<init>. Note that the Proxy class itself is not a proxy class.
- if (method->IsConstructor() &&
- method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;")) {
+ // TODO We should remove the need for this since it means we cannot always correctly detect calls
+ // to Proxy.<init>
+ // Annoyingly this can be called before we have actually initialized WellKnownClasses so therefore
+ // we also need to check this based on the declaring-class descriptor. The check is valid because
+ // Proxy only has a single constructor.
+ ArtMethod* well_known_proxy_init = jni::DecodeArtMethod(
+ WellKnownClasses::java_lang_reflect_Proxy_init);
+ if ((LIKELY(well_known_proxy_init != nullptr) && UNLIKELY(method == well_known_proxy_init)) ||
+ UNLIKELY(method->IsConstructor() &&
+ method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;"))) {
return;
}
const void* new_quick_code;
@@ -244,7 +252,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
if (m->IsRuntimeMethod()) {
const InstrumentationStackFrame& frame =
- instrumentation_stack_->at(instrumentation_stack_depth_);
+ (*instrumentation_stack_)[instrumentation_stack_depth_];
if (frame.interpreter_entry_) {
// This instrumentation frame is for an interpreter bridge and is
// pushed when executing the instrumented interpreter bridge. So method
@@ -263,7 +271,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
reached_existing_instrumentation_frames_ = true;
const InstrumentationStackFrame& frame =
- instrumentation_stack_->at(instrumentation_stack_depth_);
+ (*instrumentation_stack_)[instrumentation_stack_depth_];
CHECK_EQ(m, frame.method_) << "Expected " << ArtMethod::PrettyMethod(m)
<< ", Found " << ArtMethod::PrettyMethod(frame.method_);
return_pc = frame.return_pc_;
@@ -354,7 +362,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
}
uint32_t dex_pc = visitor.dex_pcs_.back();
visitor.dex_pcs_.pop_back();
- if (!isi->interpreter_entry_) {
+ if (!isi->interpreter_entry_ && !isi->method_->IsRuntimeMethod()) {
instrumentation->MethodEnterEvent(thread, (*isi).this_object_, (*isi).method_, dex_pc);
}
}
@@ -785,7 +793,13 @@ void Instrumentation::UpdateMethodsCodeImpl(ArtMethod* method, const void* quick
if (class_linker->IsQuickResolutionStub(quick_code) ||
class_linker->IsQuickToInterpreterBridge(quick_code)) {
new_quick_code = quick_code;
- } else if (entry_exit_stubs_installed_) {
+ } else if (entry_exit_stubs_installed_ &&
+ // We need to make sure not to replace anything that InstallStubsForMethod
+ // wouldn't. Specifically we cannot stub out Proxy.<init> since subtypes copy the
+ // implementation directly and this will confuse the instrumentation trampolines.
+ // TODO We should remove the need for this since it makes it impossible to profile
+ // Proxy.<init> correctly in all cases.
+ method != jni::DecodeArtMethod(WellKnownClasses::java_lang_reflect_Proxy_init)) {
new_quick_code = GetQuickInstrumentationEntryPoint();
} else {
new_quick_code = quick_code;
@@ -912,7 +926,7 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
}
// If there is no deoptimized method left, we can restore the stack of each thread.
- if (empty) {
+ if (empty && !entry_exit_stubs_installed_) {
MutexLock mu(self, *Locks::thread_list_lock_);
Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
instrumentation_stubs_installed_ = false;
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 2db8815fdd..c8aaa21589 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -366,7 +366,7 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) {
size_t read_count = 0;
UnorderedSet set(ptr, /*make copy*/false, &read_count);
- if (set.Empty()) {
+ if (set.empty()) {
// Avoid inserting empty sets.
return read_count;
}
@@ -392,7 +392,7 @@ size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
table_to_write = &combined;
for (UnorderedSet& table : tables_) {
for (GcRoot<mirror::String>& string : table) {
- combined.Insert(string);
+ combined.insert(string);
}
}
} else {
@@ -403,9 +403,9 @@ size_t InternTable::Table::WriteToMemory(uint8_t* ptr) {
void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
for (UnorderedSet& table : tables_) {
- auto it = table.Find(GcRoot<mirror::String>(s));
+ auto it = table.find(GcRoot<mirror::String>(s));
if (it != table.end()) {
- table.Erase(it);
+ table.erase(it);
return;
}
}
@@ -415,7 +415,7 @@ void InternTable::Table::Remove(ObjPtr<mirror::String> s) {
ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (UnorderedSet& table : tables_) {
- auto it = table.Find(GcRoot<mirror::String>(s));
+ auto it = table.find(GcRoot<mirror::String>(s));
if (it != table.end()) {
return it->Read();
}
@@ -426,7 +426,7 @@ ObjPtr<mirror::String> InternTable::Table::Find(ObjPtr<mirror::String> s) {
ObjPtr<mirror::String> InternTable::Table::Find(const Utf8String& string) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (UnorderedSet& table : tables_) {
- auto it = table.Find(string);
+ auto it = table.find(string);
if (it != table.end()) {
return it->Read();
}
@@ -442,7 +442,7 @@ void InternTable::Table::Insert(ObjPtr<mirror::String> s) {
// Always insert the last table, the image tables are before and we avoid inserting into these
// to prevent dirty pages.
DCHECK(!tables_.empty());
- tables_.back().Insert(GcRoot<mirror::String>(s));
+ tables_.back().insert(GcRoot<mirror::String>(s));
}
void InternTable::Table::VisitRoots(RootVisitor* visitor) {
@@ -467,7 +467,7 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
mirror::Object* object = it->Read<kWithoutReadBarrier>();
mirror::Object* new_object = visitor->IsMarked(object);
if (new_object == nullptr) {
- it = set->Erase(it);
+ it = set->erase(it);
} else {
*it = GcRoot<mirror::String>(new_object->AsString());
++it;
@@ -480,7 +480,7 @@ size_t InternTable::Table::Size() const {
tables_.end(),
0U,
[](size_t sum, const UnorderedSet& set) {
- return sum + set.Size();
+ return sum + set.size();
});
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 27f761a144..92d4731480 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1164,7 +1164,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
ShadowFrame& shadow_frame,
uint32_t call_site_idx)
REQUIRES_SHARED(Locks::mutator_lock_) {
- StackHandleScope<7> hs(self);
+ StackHandleScope<5> hs(self);
// There are three mandatory arguments expected from the call site
// value array in the DEX file: the bootstrap method handle, the
// method name to pass to the bootstrap method, and the method type
@@ -1358,75 +1358,80 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
}
// Check the call site target is not null as we're going to invoke it.
- Handle<mirror::CallSite> call_site =
- hs.NewHandle(ObjPtr<mirror::CallSite>::DownCast(ObjPtr<mirror::Object>(result.GetL())));
- Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
- if (UNLIKELY(target.IsNull())) {
+ ObjPtr<mirror::CallSite> call_site =
+ ObjPtr<mirror::CallSite>::DownCast(ObjPtr<mirror::Object>(result.GetL()));
+ ObjPtr<mirror::MethodHandle> target = call_site->GetTarget();
+ if (UNLIKELY(target == nullptr)) {
ThrowClassCastException("Bootstrap method returned a CallSite with a null target");
return nullptr;
}
- return call_site.Get();
+ return call_site;
}
-template<bool is_range>
+namespace {
+
+ObjPtr<mirror::CallSite> DoResolveCallSite(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t call_site_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(shadow_frame.GetMethod()->GetDexCache()));
+
+ // Get the call site from the DexCache if present.
+ ObjPtr<mirror::CallSite> call_site = dex_cache->GetResolvedCallSite(call_site_idx);
+ if (LIKELY(call_site != nullptr)) {
+ return call_site;
+ }
+
+ // Invoke the bootstrap method to get a candidate call site.
+ call_site = InvokeBootstrapMethod(self, shadow_frame, call_site_idx);
+ if (UNLIKELY(call_site == nullptr)) {
+ if (!self->GetException()->IsError()) {
+ // Use a BootstrapMethodError if the exception is not an instance of java.lang.Error.
+ ThrowWrappedBootstrapMethodError("Exception from call site #%u bootstrap method",
+ call_site_idx);
+ }
+ return nullptr;
+ }
+
+ // Attempt to place the candidate call site into the DexCache, return the winning call site.
+ return dex_cache->SetResolvedCallSite(call_site_idx, call_site);
+}
+
+} // namespace
+
bool DoInvokeCustom(Thread* self,
ShadowFrame& shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t call_site_idx,
+ const InstructionOperands* operands,
+ JValue* result) {
// Make sure to check for async exceptions
if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
+
// invoke-custom is not supported in transactions. In transactions
// there is a limited set of types supported. invoke-custom allows
// running arbitrary code and instantiating arbitrary types.
CHECK(!Runtime::Current()->IsActiveTransaction());
- StackHandleScope<4> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(shadow_frame.GetMethod()->GetDexCache()));
- const uint32_t call_site_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- MutableHandle<mirror::CallSite>
- call_site(hs.NewHandle(dex_cache->GetResolvedCallSite(call_site_idx)));
+
+ ObjPtr<mirror::CallSite> call_site = DoResolveCallSite(self, shadow_frame, call_site_idx);
if (call_site.IsNull()) {
- call_site.Assign(InvokeBootstrapMethod(self, shadow_frame, call_site_idx));
- if (UNLIKELY(call_site.IsNull())) {
- CHECK(self->IsExceptionPending());
- if (!self->GetException()->IsError()) {
- // Use a BootstrapMethodError if the exception is not an instance of java.lang.Error.
- ThrowWrappedBootstrapMethodError("Exception from call site #%u bootstrap method",
- call_site_idx);
- }
- result->SetJ(0);
- return false;
- }
- mirror::CallSite* winning_call_site =
- dex_cache->SetResolvedCallSite(call_site_idx, call_site.Get());
- call_site.Assign(winning_call_site);
+ DCHECK(self->IsExceptionPending());
+ return false;
}
+ StackHandleScope<2> hs(self);
Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
Handle<mirror::MethodType> target_method_type = hs.NewHandle(target->GetMethodType());
- DCHECK_EQ(static_cast<size_t>(inst->VRegA()), target_method_type->NumberOfVRegs());
- if (is_range) {
- RangeInstructionOperands operands(inst->VRegC_3rc(), inst->VRegA_3rc());
- return MethodHandleInvokeExact(self,
- shadow_frame,
- target,
- target_method_type,
- &operands,
- result);
- } else {
- uint32_t args[Instruction::kMaxVarArgRegs];
- inst->GetVarArgs(args, inst_data);
- VarArgsInstructionOperands operands(args, inst->VRegA_35c());
- return MethodHandleInvokeExact(self,
- shadow_frame,
- target,
- target_method_type,
- &operands,
- result);
- }
+ DCHECK_EQ(operands->GetNumberOfOperands(), target_method_type->NumberOfVRegs())
+ << " call_site_idx" << call_site_idx;
+ return MethodHandleInvokeExact(self,
+ shadow_frame,
+ target,
+ target_method_type,
+ operands,
+ result);
}
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
@@ -1847,16 +1852,6 @@ EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(false);
EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL(true);
#undef EXPLICIT_DO_INVOKE_POLYMORPHIC_TEMPLATE_DECL
-// Explicit DoInvokeCustom template function declarations.
-#define EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(_is_range) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvokeCustom<_is_range>( \
- Thread* self, ShadowFrame& shadow_frame, const Instruction* inst, \
- uint16_t inst_data, JValue* result)
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(false);
-EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL(true);
-#undef EXPLICIT_DO_INVOKE_CUSTOM_TEMPLATE_DECL
-
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 60bf50546f..b324b4c99d 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -242,7 +242,15 @@ bool DoInvokePolymorphic(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
uint16_t inst_data,
- JValue* result);
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+bool DoInvokeCustom(Thread* self,
+ ShadowFrame& shadow_frame,
+ uint32_t call_site_idx,
+ const InstructionOperands* operands,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Performs a custom invoke (invoke-custom/invoke-custom-range).
template<bool is_range>
@@ -250,7 +258,19 @@ bool DoInvokeCustom(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
uint16_t inst_data,
- JValue* result);
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const uint32_t call_site_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ if (is_range) {
+ RangeInstructionOperands operands(inst->VRegC_3rc(), inst->VRegA_3rc());
+ return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
+ } else {
+ uint32_t args[Instruction::kMaxVarArgRegs];
+ inst->GetVarArgs(args, inst_data);
+ VarArgsInstructionOperands operands(args, inst->VRegA_35c());
+ return DoInvokeCustom(self, shadow_frame, call_site_idx, &operands, result);
+ }
+}
// Handles invoke-virtual-quick and invoke-virtual-quick-range instructions.
// Returns true on success, otherwise throws an exception and returns false.
diff --git a/runtime/interpreter/mterp/arm/op_mul_long.S b/runtime/interpreter/mterp/arm/op_mul_long.S
index a13c803301..4f55280871 100644
--- a/runtime/interpreter/mterp/arm/op_mul_long.S
+++ b/runtime/interpreter/mterp/arm/op_mul_long.S
@@ -29,6 +29,7 @@
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST, lsr #8 @ r0<- AA
add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
+ CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/mterp/out/mterp_arm.S b/runtime/interpreter/mterp/out/mterp_arm.S
index b2702a9ffc..53ea3651cc 100644
--- a/runtime/interpreter/mterp/out/mterp_arm.S
+++ b/runtime/interpreter/mterp/out/mterp_arm.S
@@ -4475,6 +4475,7 @@ constvalop_long_to_double:
mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
mov r0, rINST, lsr #8 @ r0<- AA
add r2, r2, lr @ r2<- lr + low(ZxW + (YxX))
+ CLEAR_SHADOW_PAIR r0, lr, ip @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r0, r0 @ r0<- &fp[AA]
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 667bd03c18..22a6e9d941 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1485,13 +1485,17 @@ void UnstartedRuntime::UnstartedUnsafeCompareAndSwapObject(
bool success;
// Check whether we're in a transaction, call accordingly.
if (Runtime::Current()->IsActiveTransaction()) {
- success = obj->CasFieldStrongSequentiallyConsistentObject<true>(MemberOffset(offset),
- expected_value,
- newValue);
+ success = obj->CasFieldObject<true>(MemberOffset(offset),
+ expected_value,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
} else {
- success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
- expected_value,
- newValue);
+ success = obj->CasFieldObject<false>(MemberOffset(offset),
+ expected_value,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
}
result->SetZ(success ? 1 : 0);
}
@@ -1855,11 +1859,17 @@ void UnstartedRuntime::UnstartedJNIUnsafeCompareAndSwapInt(
jint newValue = args[4];
bool success;
if (Runtime::Current()->IsActiveTransaction()) {
- success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
- expectedValue, newValue);
+ success = obj->CasField32<true>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
} else {
- success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue, newValue);
+ success = obj->CasField32<false>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
}
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 655713e8c6..200fc5b334 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -43,6 +43,18 @@
namespace art {
namespace interpreter {
+// Deleter to be used with ShadowFrame::CreateDeoptimizedFrame objects.
+struct DeoptShadowFrameDelete {
+ // NOTE: Deleting a const object is valid but free() takes a non-const pointer.
+ void operator()(ShadowFrame* ptr) const {
+ if (ptr != nullptr) {
+ ShadowFrame::DeleteDeoptimizedFrame(ptr);
+ }
+ }
+};
+// Alias for std::unique_ptr<> that uses the above deleter.
+using UniqueDeoptShadowFramePtr = std::unique_ptr<ShadowFrame, DeoptShadowFrameDelete>;
+
class UnstartedRuntimeTest : public CommonRuntimeTest {
protected:
// Re-expose all UnstartedRuntime implementations so we don't need to declare a million
@@ -79,6 +91,14 @@ class UnstartedRuntimeTest : public CommonRuntimeTest {
#undef UNSTARTED_RUNTIME_JNI_LIST
#undef UNSTARTED_JNI
+ UniqueDeoptShadowFramePtr CreateShadowFrame(uint32_t num_vregs,
+ ShadowFrame* link,
+ ArtMethod* method,
+ uint32_t dex_pc) {
+ return UniqueDeoptShadowFramePtr(
+ ShadowFrame::CreateDeoptimizedFrame(num_vregs, link, method, dex_pc));
+ }
+
// Helpers for ArrayCopy.
//
// Note: as we have to use handles, we use StackHandleScope to transfer data. Hardcode a size
@@ -213,17 +233,15 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekByte) {
const uint8_t* base_ptr = base_array;
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
for (int32_t i = 0; i < kBaseLen; ++i) {
tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
- UnstartedMemoryPeekByte(self, tmp, &result, 0);
+ UnstartedMemoryPeekByte(self, tmp.get(), &result, 0);
EXPECT_EQ(result.GetB(), static_cast<int8_t>(base_array[i]));
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, MemoryPeekShort) {
@@ -235,20 +253,18 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekShort) {
const uint8_t* base_ptr = base_array;
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int16_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
- UnstartedMemoryPeekShort(self, tmp, &result, 0);
+ UnstartedMemoryPeekShort(self, tmp.get(), &result, 0);
typedef int16_t unaligned_short __attribute__ ((aligned (1)));
const unaligned_short* short_ptr = reinterpret_cast<const unaligned_short*>(base_ptr + i);
EXPECT_EQ(result.GetS(), *short_ptr);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, MemoryPeekInt) {
@@ -260,20 +276,18 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekInt) {
const uint8_t* base_ptr = base_array;
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int32_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
- UnstartedMemoryPeekInt(self, tmp, &result, 0);
+ UnstartedMemoryPeekInt(self, tmp.get(), &result, 0);
typedef int32_t unaligned_int __attribute__ ((aligned (1)));
const unaligned_int* int_ptr = reinterpret_cast<const unaligned_int*>(base_ptr + i);
EXPECT_EQ(result.GetI(), *int_ptr);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, MemoryPeekLong) {
@@ -285,20 +299,18 @@ TEST_F(UnstartedRuntimeTest, MemoryPeekLong) {
const uint8_t* base_ptr = base_array;
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
int32_t adjusted_length = kBaseLen - sizeof(int64_t);
for (int32_t i = 0; i < adjusted_length; ++i) {
tmp->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<intptr_t>(base_ptr + i)));
- UnstartedMemoryPeekLong(self, tmp, &result, 0);
+ UnstartedMemoryPeekLong(self, tmp.get(), &result, 0);
typedef int64_t unaligned_long __attribute__ ((aligned (1)));
const unaligned_long* long_ptr = reinterpret_cast<const unaligned_long*>(base_ptr + i);
EXPECT_EQ(result.GetJ(), *long_ptr);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
@@ -317,7 +329,7 @@ TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
uint16_t buf[kBaseLen];
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
for (int32_t start_index = 0; start_index < kBaseLen; ++start_index) {
for (int32_t count = 0; count <= kBaseLen; ++count) {
@@ -333,7 +345,7 @@ TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
// Copy the char_array into buf.
memcpy(buf, h_char_array->GetData(), kBaseLen * sizeof(uint16_t));
- UnstartedStringCharAt(self, tmp, &result, 0);
+ UnstartedStringCharAt(self, tmp.get(), &result, 0);
uint16_t* data = h_char_array->GetData();
@@ -357,8 +369,6 @@ TEST_F(UnstartedRuntimeTest, StringGetCharsNoCheck) {
}
}
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, StringCharAt) {
@@ -371,18 +381,16 @@ TEST_F(UnstartedRuntimeTest, StringCharAt) {
ObjPtr<mirror::String> test_string = mirror::String::AllocFromModifiedUtf8(self, base_string);
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
for (int32_t i = 0; i < base_len; ++i) {
tmp->SetVRegReference(0, test_string);
tmp->SetVReg(1, i);
- UnstartedStringCharAt(self, tmp, &result, 0);
+ UnstartedStringCharAt(self, tmp.get(), &result, 0);
EXPECT_EQ(result.GetI(), base_string[i]);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, StringInit) {
@@ -398,7 +406,7 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, method, 0);
const char* base_string = "hello_world";
StackHandleScope<2> hs(self);
Handle<mirror::String> string_arg =
@@ -433,8 +441,6 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
}
EXPECT_EQ(equal, true);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
// Tests the exceptions that should be checked before modifying the destination.
@@ -443,7 +449,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTestExceptions) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Note: all tests are not GC safe. Assume there's no GC running here with the few objects we
// allocate.
@@ -458,26 +464,24 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTestExceptions) {
Handle<mirror::ObjectArray<mirror::Object>> array(
hs_misc.NewHandle(CreateObjectArray(self, object_class.Get(), hs_data)));
- RunArrayCopy(self, tmp, true, array.Get(), -1, array.Get(), 0, 0);
- RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), -1, 0);
- RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 0, -1);
- RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 0, 4);
- RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 1, 3);
- RunArrayCopy(self, tmp, true, array.Get(), 1, array.Get(), 0, 3);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), -1, array.Get(), 0, 0);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 0, array.Get(), -1, 0);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 0, array.Get(), 0, -1);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 0, array.Get(), 0, 4);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 0, array.Get(), 1, 3);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 1, array.Get(), 0, 3);
Handle<mirror::ObjectArray<mirror::Object>> class_as_array =
hs_misc.NewHandle(reinterpret_cast<mirror::ObjectArray<mirror::Object>*>(object_class.Get()));
- RunArrayCopy(self, tmp, true, class_as_array.Get(), 0, array.Get(), 0, 0);
- RunArrayCopy(self, tmp, true, array.Get(), 0, class_as_array.Get(), 0, 0);
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
+ RunArrayCopy(self, tmp.get(), true, class_as_array.Get(), 0, array.Get(), 0, 0);
+ RunArrayCopy(self, tmp.get(), true, array.Get(), 0, class_as_array.Get(), 0, 0);
}
TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
StackHandleScope<1> hs_object(self);
Handle<mirror::Class> object_class(hs_object.NewHandle(GetClassRoot<mirror::Object>()));
@@ -501,7 +505,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
hs_expected.NewHandle(hs_src.GetReference(1));
RunArrayCopy(self,
- tmp,
+ tmp.get(),
false,
object_class.Get(),
object_class.Get(),
@@ -532,7 +536,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
hs_expected.NewHandle(hs_dst.GetReference(2));
RunArrayCopy(self,
- tmp,
+ tmp.get(),
false,
object_class.Get(),
GetClassRoot<mirror::String>(),
@@ -563,7 +567,7 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
hs_expected.NewHandle(hs_dst.GetReference(2));
RunArrayCopy(self,
- tmp,
+ tmp.get(),
true,
object_class.Get(),
GetClassRoot<mirror::String>(),
@@ -574,15 +578,13 @@ TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
3,
hs_expected);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, IntegerParseIntTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
// suffixes).
@@ -616,13 +618,11 @@ TEST_F(UnstartedRuntimeTest, IntegerParseIntTest) {
tmp->SetVRegReference(0, h_str.Get());
JValue result;
- UnstartedIntegerParseInt(self, tmp, &result, 0);
+ UnstartedIntegerParseInt(self, tmp.get(), &result, 0);
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(result.GetI(), test_values[i]);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
// Right now the same as Integer.Parse
@@ -630,7 +630,7 @@ TEST_F(UnstartedRuntimeTest, LongParseLongTest) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Test string. Should be valid, and between minimal values of LONG_MIN and LONG_MAX (for all
// suffixes).
@@ -664,20 +664,18 @@ TEST_F(UnstartedRuntimeTest, LongParseLongTest) {
tmp->SetVRegReference(0, h_str.Get());
JValue result;
- UnstartedLongParseLong(self, tmp, &result, 0);
+ UnstartedLongParseLong(self, tmp.get(), &result, 0);
ASSERT_FALSE(self->IsExceptionPending());
EXPECT_EQ(result.GetJ(), test_values[i]);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, Ceil) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
constexpr double nan = std::numeric_limits<double>::quiet_NaN();
constexpr double inf = std::numeric_limits<double>::infinity();
@@ -697,16 +695,14 @@ TEST_F(UnstartedRuntimeTest, Ceil) {
{ ld2, ld2 }
};
- TestCeilFloor(true /* ceil */, self, tmp, test_pairs, arraysize(test_pairs));
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
+ TestCeilFloor(true /* ceil */, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, Floor) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
constexpr double nan = std::numeric_limits<double>::quiet_NaN();
constexpr double inf = std::numeric_limits<double>::infinity();
@@ -726,16 +722,14 @@ TEST_F(UnstartedRuntimeTest, Floor) {
{ ld2, ld2 }
};
- TestCeilFloor(false /* floor */, self, tmp, test_pairs, arraysize(test_pairs));
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
+ TestCeilFloor(false /* floor */, self, tmp.get(), test_pairs, arraysize(test_pairs));
}
TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
std::locale c_locale("C");
@@ -749,7 +743,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
{
JValue result;
tmp->SetVReg(0, static_cast<int32_t>(i));
- UnstartedCharacterToLowerCase(self, tmp, &result, 0);
+ UnstartedCharacterToLowerCase(self, tmp.get(), &result, 0);
ASSERT_FALSE(self->IsExceptionPending());
uint32_t lower_result = static_cast<uint32_t>(result.GetI());
if (c_lower) {
@@ -766,7 +760,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
{
JValue result2;
tmp->SetVReg(0, static_cast<int32_t>(i));
- UnstartedCharacterToUpperCase(self, tmp, &result2, 0);
+ UnstartedCharacterToUpperCase(self, tmp.get(), &result2, 0);
ASSERT_FALSE(self->IsExceptionPending());
uint32_t upper_result = static_cast<uint32_t>(result2.GetI());
if (c_upper) {
@@ -789,7 +783,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
JValue result;
tmp->SetVReg(0, static_cast<int32_t>(i));
Runtime::Current()->EnterTransactionMode();
- UnstartedCharacterToLowerCase(self, tmp, &result, 0);
+ UnstartedCharacterToLowerCase(self, tmp.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
@@ -798,7 +792,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
JValue result;
tmp->SetVReg(0, static_cast<int32_t>(i));
Runtime::Current()->EnterTransactionMode();
- UnstartedCharacterToUpperCase(self, tmp, &result, 0);
+ UnstartedCharacterToUpperCase(self, tmp.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
@@ -809,7 +803,7 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
JValue result;
tmp->SetVReg(0, static_cast<int32_t>(i));
Runtime::Current()->EnterTransactionMode();
- UnstartedCharacterToLowerCase(self, tmp, &result, 0);
+ UnstartedCharacterToLowerCase(self, tmp.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
@@ -818,64 +812,53 @@ TEST_F(UnstartedRuntimeTest, ToLowerUpper) {
JValue result;
tmp->SetVReg(0, static_cast<int32_t>(i));
Runtime::Current()->EnterTransactionMode();
- UnstartedCharacterToUpperCase(self, tmp, &result, 0);
+ UnstartedCharacterToUpperCase(self, tmp.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
}
}
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, Sin) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Test an important value, PI/6. That's the one we see in practice.
constexpr uint64_t lvalue = UINT64_C(0x3fe0c152382d7365);
tmp->SetVRegLong(0, static_cast<int64_t>(lvalue));
JValue result;
- UnstartedMathSin(self, tmp, &result, 0);
+ UnstartedMathSin(self, tmp.get(), &result, 0);
const uint64_t lresult = static_cast<uint64_t>(result.GetJ());
EXPECT_EQ(UINT64_C(0x3fdfffffffffffff), lresult);
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, Cos) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Test an important value, PI/6. That's the one we see in practice.
constexpr uint64_t lvalue = UINT64_C(0x3fe0c152382d7365);
tmp->SetVRegLong(0, static_cast<int64_t>(lvalue));
JValue result;
- UnstartedMathCos(self, tmp, &result, 0);
+ UnstartedMathCos(self, tmp.get(), &result, 0);
const uint64_t lresult = static_cast<uint64_t>(result.GetJ());
EXPECT_EQ(UINT64_C(0x3febb67ae8584cab), lresult);
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, Pow) {
- // Valgrind seems to get this wrong, actually. Disable for valgrind.
- if (RUNNING_ON_MEMORY_TOOL != 0 && kMemoryToolIsValgrind) {
- return;
- }
-
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
// Test an important pair.
constexpr uint64_t lvalue1 = UINT64_C(0x4079000000000000);
@@ -885,12 +868,10 @@ TEST_F(UnstartedRuntimeTest, Pow) {
tmp->SetVRegLong(2, static_cast<int64_t>(lvalue2));
JValue result;
- UnstartedMathPow(self, tmp, &result, 0);
+ UnstartedMathPow(self, tmp.get(), &result, 0);
const uint64_t lresult = static_cast<uint64_t>(result.GetJ());
EXPECT_EQ(UINT64_C(0x3f8c5c51326aa7ee), lresult);
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
@@ -898,11 +879,11 @@ TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
ObjPtr<mirror::Class> class_klass = GetClassRoot<mirror::Class>();
shadow_frame->SetVRegReference(0, class_klass);
- UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+ UnstartedClassIsAnonymousClass(self, shadow_frame.get(), &result, 0);
EXPECT_EQ(result.GetZ(), 0);
jobject class_loader = LoadDex("Nested");
@@ -912,10 +893,8 @@ TEST_F(UnstartedRuntimeTest, IsAnonymousClass) {
ObjPtr<mirror::Class> c = class_linker_->FindClass(soa.Self(), "LNested$1;", loader);
ASSERT_TRUE(c != nullptr);
shadow_frame->SetVRegReference(0, c);
- UnstartedClassIsAnonymousClass(self, shadow_frame, &result, 0);
+ UnstartedClassIsAnonymousClass(self, shadow_frame.get(), &result, 0);
EXPECT_EQ(result.GetZ(), 1);
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
@@ -923,7 +902,7 @@ TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
jobject class_loader = LoadDex("Nested");
StackHandleScope<4> hs(self);
@@ -938,18 +917,16 @@ TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
class_linker_->FindClass(soa.Self(), "LNested$1;", loader)));
shadow_frame->SetVRegReference(0, nested_klass.Get());
- UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ UnstartedClassGetDeclaringClass(self, shadow_frame.get(), &result, 0);
EXPECT_EQ(result.GetL(), nullptr);
shadow_frame->SetVRegReference(0, inner_klass.Get());
- UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ UnstartedClassGetDeclaringClass(self, shadow_frame.get(), &result, 0);
EXPECT_EQ(result.GetL(), nested_klass.Get());
shadow_frame->SetVRegReference(0, anon_klass.Get());
- UnstartedClassGetDeclaringClass(self, shadow_frame, &result, 0);
+ UnstartedClassGetDeclaringClass(self, shadow_frame.get(), &result, 0);
EXPECT_EQ(result.GetL(), nullptr);
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
@@ -957,7 +934,7 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
StackHandleScope<1> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -979,14 +956,14 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ASSERT_TRUE(caller_method != nullptr);
ASSERT_TRUE(caller_method->IsDirect());
ASSERT_TRUE(caller_method->GetDeclaringClass() == floating_decimal.Get());
- ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
- shadow_frame->SetLink(caller_frame);
+ UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame.get());
- UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ UnstartedThreadLocalGet(self, shadow_frame.get(), &result, 0);
EXPECT_TRUE(result.GetL() != nullptr);
EXPECT_FALSE(self->IsExceptionPending());
- ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ shadow_frame->SetLink(nullptr);
}
// Negative test.
@@ -997,20 +974,18 @@ TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
ObjPtr<mirror::Class> class_class = GetClassRoot<mirror::Class>();
ArtMethod* caller_method =
&*class_class->GetDeclaredMethods(class_linker->GetImagePointerSize()).begin();
- ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
- shadow_frame->SetLink(caller_frame);
+ UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame.get());
Runtime::Current()->EnterTransactionMode();
- UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ UnstartedThreadLocalGet(self, shadow_frame.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
self->ClearException();
- ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ shadow_frame->SetLink(nullptr);
}
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, FloatConversion) {
@@ -1037,7 +1012,8 @@ TEST_F(UnstartedRuntimeTest, FloatConversion) {
uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, method, 0);
+
shadow_frame->SetVRegDouble(0, 1.23);
interpreter::DoCall<false, false>(method,
self,
@@ -1050,8 +1026,6 @@ TEST_F(UnstartedRuntimeTest, FloatConversion) {
std::string mod_utf = string_result->ToModifiedUtf8();
EXPECT_EQ("1.23", mod_utf);
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, ThreadCurrentThread) {
@@ -1059,7 +1033,7 @@ TEST_F(UnstartedRuntimeTest, ThreadCurrentThread) {
ScopedObjectAccess soa(self);
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
StackHandleScope<1> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -1074,14 +1048,12 @@ TEST_F(UnstartedRuntimeTest, ThreadCurrentThread) {
{
Runtime::Current()->EnterTransactionMode();
- UnstartedThreadCurrentThread(self, shadow_frame, &result, 0);
+ UnstartedThreadCurrentThread(self, shadow_frame.get(), &result, 0);
ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
Runtime::Current()->ExitTransactionMode();
ASSERT_TRUE(self->IsExceptionPending());
self->ClearException();
}
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, LogManager) {
@@ -1138,7 +1110,7 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
}
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
for (const char* name : kTestCases) {
ObjPtr<mirror::String> name_string = mirror::String::AllocFromModifiedUtf8(self, name);
@@ -1149,7 +1121,7 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
}
CHECK(!self->IsExceptionPending());
- runner(self, shadow_frame, name_string, &result);
+ runner(self, shadow_frame.get(), name_string, &result);
if (should_succeed) {
CHECK(!self->IsExceptionPending()) << name << " " << self->GetException()->Dump();
@@ -1166,8 +1138,6 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
Runtime::Current()->ExitTransactionMode();
}
}
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
mirror::ClassLoader* GetBootClassLoader() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1194,7 +1164,7 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
CHECK(boot_cp_init != nullptr);
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, boot_cp_init, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, boot_cp_init, 0);
shadow_frame->SetVRegReference(0, boot_cp.Get());
// create instruction data for invoke-direct {v0} of method with fake index
@@ -1207,8 +1177,6 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
inst_data[0],
&result);
CHECK(!self->IsExceptionPending());
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
return boot_cp.Get();
@@ -1315,15 +1283,13 @@ TEST_F(UnstartedRuntimeTest, ClassGetSignatureAnnotation) {
ASSERT_TRUE(class_linker->EnsureInitialized(self, list_class, true, true));
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
shadow_frame->SetVRegReference(0, list_class.Get());
- UnstartedClassGetSignatureAnnotation(self, shadow_frame, &result, 0);
+ UnstartedClassGetSignatureAnnotation(self, shadow_frame.get(), &result, 0);
ASSERT_TRUE(result.GetL() != nullptr);
ASSERT_FALSE(self->IsExceptionPending());
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
-
ASSERT_TRUE(result.GetL()->IsObjectArray());
ObjPtr<mirror::ObjectArray<mirror::Object>> array =
result.GetL()->AsObjectArray<mirror::Object>();
@@ -1375,10 +1341,10 @@ TEST_F(UnstartedRuntimeTest, ConstructorNewInstance0) {
// OK, we're ready now.
JValue result;
- ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr shadow_frame = CreateShadowFrame(10, nullptr, nullptr, 0);
shadow_frame->SetVRegReference(0, cons.Get());
shadow_frame->SetVRegReference(1, args.Get());
- UnstartedConstructorNewInstance0(self, shadow_frame, &result, 0);
+ UnstartedConstructorNewInstance0(self, shadow_frame.get(), &result, 0);
ASSERT_TRUE(result.GetL() != nullptr);
ASSERT_FALSE(self->IsExceptionPending());
@@ -1391,29 +1357,25 @@ TEST_F(UnstartedRuntimeTest, ConstructorNewInstance0) {
ObjPtr<mirror::String> result_msg =
reinterpret_cast<mirror::Throwable*>(result.GetL())->GetDetailMessage();
EXPECT_OBJ_PTR_EQ(input.Get(), result_msg);
-
- ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
TEST_F(UnstartedRuntimeTest, IdentityHashCode) {
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
- ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+ UniqueDeoptShadowFramePtr tmp = CreateShadowFrame(10, nullptr, nullptr, 0);
JValue result;
- UnstartedSystemIdentityHashCode(self, tmp, &result, 0);
+ UnstartedSystemIdentityHashCode(self, tmp.get(), &result, 0);
EXPECT_EQ(0, result.GetI());
ASSERT_FALSE(self->IsExceptionPending());
ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "abd");
tmp->SetVRegReference(0, str);
- UnstartedSystemIdentityHashCode(self, tmp, &result, 0);
+ UnstartedSystemIdentityHashCode(self, tmp.get(), &result, 0);
EXPECT_NE(0, result.GetI());
EXPECT_EQ(str->IdentityHashCode(), result.GetI());
ASSERT_FALSE(self->IsExceptionPending());
-
- ShadowFrame::DeleteDeoptimizedFrame(tmp);
}
} // namespace interpreter
diff --git a/runtime/jdwp_provider.h b/runtime/jdwp_provider.h
index 698fdc086d..c4f19899c9 100644
--- a/runtime/jdwp_provider.h
+++ b/runtime/jdwp_provider.h
@@ -19,6 +19,7 @@
#include <ios>
+#include "base/globals.h"
#include "base/macros.h"
#include "base/logging.h"
@@ -26,13 +27,33 @@ namespace art {
enum class JdwpProvider {
kNone,
+ // Special value only used to denote that no explicit choice has been made by the user. This
+ // should not be used and one should always call CanonicalizeJdwpProvider which will remove this
+ // value before using a JdwpProvider value.
+ kUnset,
kInternal,
kAdbConnection,
- // The current default provider
+ // The current default provider. Used if you run -XjdwpProvider:default
kDefaultJdwpProvider = kAdbConnection,
+
+ // What we should use as provider with no options and debuggable. On host we always want to be
+ // none since there is no adbd on host.
+ kUnsetDebuggable = kIsTargetBuild ? kDefaultJdwpProvider : kNone,
+ // What we should use as provider with no options and non-debuggable
+ kUnsetNonDebuggable = kNone,
};
+inline JdwpProvider CanonicalizeJdwpProvider(JdwpProvider p, bool debuggable) {
+ if (p != JdwpProvider::kUnset) {
+ return p;
+ }
+ if (debuggable) {
+ return JdwpProvider::kUnsetDebuggable;
+ }
+ return JdwpProvider::kUnsetNonDebuggable;
+}
+
std::ostream& operator<<(std::ostream& os, const JdwpProvider& rhs);
} // namespace art
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index b7b779ce31..5e736035f8 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -333,7 +333,7 @@ void Jit::DeleteThreadPool() {
}
// When running sanitized, let all tasks finish to not leak. Otherwise just clear the queue.
- if (!RUNNING_ON_MEMORY_TOOL) {
+ if (!kRunningOnMemoryTool) {
pool->StopWorkers(self);
pool->RemoveAllTasks(self);
}
@@ -493,8 +493,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// We found a stack map, now fill the frame with dex register values from the interpreter's
// shadow frame.
- DexRegisterMap vreg_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
+ DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
frame_size = osr_method->GetFrameSizeInBytes();
@@ -510,12 +509,13 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
memory[0] = method;
shadow_frame = thread->PopShadowFrame();
- if (!vreg_map.IsValid()) {
+ if (vreg_map.empty()) {
// If we don't have a dex register map, then there are no live dex registers at
// this dex pc.
} else {
+ DCHECK_EQ(vreg_map.size(), number_of_vregs);
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
- DexRegisterLocation::Kind location = vreg_map.GetLocationKind(vreg);
+ DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
if (location == DexRegisterLocation::Kind::kNone) {
// Dex register is dead or uninitialized.
continue;
@@ -529,7 +529,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
int32_t vreg_value = shadow_frame->GetVReg(vreg);
- int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg);
+ int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
DCHECK_GT(slot_offset, 0);
(reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b010650345..3d7fe89cd0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -21,6 +21,7 @@
#include "arch/context.h"
#include "art_method-inl.h"
#include "base/enums.h"
+#include "base/histogram-inl.h"
#include "base/logging.h" // For VLOG.
#include "base/mem_map.h"
#include "base/quasi_atomic.h"
@@ -30,10 +31,12 @@
#include "cha.h"
#include "debugger_interface.h"
#include "dex/dex_file_loader.h"
+#include "dex/method_reference.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle.h"
+#include "instrumentation.h"
#include "intern_table.h"
#include "jit/jit.h"
#include "jit/profiling_info.h"
@@ -167,8 +170,8 @@ JitCodeCache* JitCodeCache::Create(size_t initial_capacity,
// Generating debug information is for using the Linux perf tool on
// host which does not work with ashmem.
- // Also, target linux does not support ashmem.
- bool use_ashmem = !generate_debug_info && !kIsTargetLinux;
+ // Also, targets linux and fuchsia do not support ashmem.
+ bool use_ashmem = !generate_debug_info && !kIsTargetLinux && !kIsTargetFuchsia;
// With 'perf', we want a 1-1 mapping between an address and a method.
bool garbage_collect_code = !generate_debug_info;
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 958e8e8aa2..b056bc3b4c 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -17,16 +17,19 @@
#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
-#include "instrumentation.h"
+#include <iosfwd>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_set>
+#include <vector>
#include "base/arena_containers.h"
#include "base/atomic.h"
-#include "base/histogram-inl.h"
+#include "base/histogram.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "base/safe_map.h"
-#include "dex/method_reference.h"
-#include "gc_root.h"
namespace art {
@@ -36,6 +39,7 @@ class LinearAlloc;
class InlineCache;
class IsMarkedVisitor;
class JitJniStubTestHelper;
+class MemMap;
class OatQuickMethodHeader;
struct ProfileMethodInfo;
class ProfilingInfo;
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index 106a80a568..8424610cf8 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -31,7 +31,6 @@
#include "mirror/class_loader.h"
#include "profile/profile_compilation_info.h"
#include "scoped_thread_state_change-inl.h"
-#include "ziparchive/zip_writer.h"
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/jni/check_jni.cc
index 0ff55ae25b..7919c32737 100644
--- a/runtime/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -35,8 +35,8 @@
#include "dex/descriptors_names.h"
#include "dex/dex_file-inl.h"
#include "gc/space/space.h"
-#include "jni/java_vm_ext.h"
-#include "jni/jni_internal.h"
+#include "java_vm_ext.h"
+#include "jni_internal.h"
#include "mirror/class-inl.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -2173,7 +2173,7 @@ class CheckJNI {
return result;
}
- static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, const jvalue* vargs) {
CHECK_ATTACHED_THREAD(__FUNCTION__, nullptr);
ScopedObjectAccess soa(env);
ScopedCheck sc(kFlag_Default, __FUNCTION__);
@@ -2268,16 +2268,16 @@ class CheckJNI {
FIELD_ACCESSORS(jdouble, Double, Primitive::kPrimDouble, D, D)
#undef FIELD_ACCESSORS
- static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) {
+ static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* vargs) {
CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
}
static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
- jvalue* vargs) {
+ const jvalue* vargs) {
CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
}
- static void CallStaticVoidMethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ static void CallStaticVoidMethodA(JNIEnv* env, jclass c, jmethodID mid, const jvalue* vargs) {
CallMethodA(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
}
@@ -2316,16 +2316,16 @@ class CheckJNI {
}
#define CALL(rtype, name, ptype, shorty) \
- static rtype Call##name##MethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) { \
+ static rtype Call##name##MethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* vargs) { \
return CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
} \
\
static rtype CallNonvirtual##name##MethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
- jvalue* vargs) { \
+ const jvalue* vargs) { \
return CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
} \
\
- static rtype CallStatic##name##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) { \
+ static rtype CallStatic##name##MethodA(JNIEnv* env, jclass c, jmethodID mid, const jvalue* vargs) { \
return CallMethodA(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
} \
\
@@ -3070,7 +3070,7 @@ class CheckJNI {
}
static JniValueType CallMethodA(const char* function_name, JNIEnv* env, jobject obj, jclass c,
- jmethodID mid, jvalue* vargs, Primitive::Type type,
+ jmethodID mid, const jvalue* vargs, Primitive::Type type,
InvokeType invoke) {
CHECK_ATTACHED_THREAD(function_name, JniValueType());
ScopedObjectAccess soa(env);
diff --git a/runtime/check_jni.h b/runtime/jni/check_jni.h
index f41abf81ce..10fdfe859d 100644
--- a/runtime/check_jni.h
+++ b/runtime/jni/check_jni.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CHECK_JNI_H_
-#define ART_RUNTIME_CHECK_JNI_H_
+#ifndef ART_RUNTIME_JNI_CHECK_JNI_H_
+#define ART_RUNTIME_JNI_CHECK_JNI_H_
#include <jni.h>
@@ -26,4 +26,4 @@ const JNIInvokeInterface* GetCheckJniInvokeInterface();
} // namespace art
-#endif // ART_RUNTIME_CHECK_JNI_H_
+#endif // ART_RUNTIME_JNI_CHECK_JNI_H_
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index 8fe68bd318..44679a5afa 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -912,7 +912,11 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
return utf.c_str();
}
}
- env->ExceptionClear();
+ if (env->ExceptionCheck()) {
+ // We can't do much better logging, really. So leave it with a Describe.
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ }
return "(Error calling toString)";
}
return "null";
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 7290d638f3..a02e76ae54 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -761,7 +761,7 @@ class JNI {
return local_result;
}
- static jobject NewObjectA(JNIEnv* env, jclass java_class, jmethodID mid, jvalue* args) {
+ static jobject NewObjectA(JNIEnv* env, jclass java_class, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT(java_class);
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
@@ -824,7 +824,7 @@ class JNI {
return soa.AddLocalReference<jobject>(result.GetL());
}
- static jobject CallObjectMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jobject CallObjectMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT(obj);
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
@@ -850,7 +850,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetZ();
}
- static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jboolean CallBooleanMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -875,7 +875,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetB();
}
- static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jbyte CallByteMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -900,7 +900,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetC();
}
- static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jchar CallCharMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -925,7 +925,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetD();
}
- static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jdouble CallDoubleMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -950,7 +950,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetF();
}
- static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jfloat CallFloatMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -975,7 +975,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetI();
}
- static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jint CallIntMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1000,7 +1000,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetJ();
}
- static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jlong CallLongMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1025,7 +1025,7 @@ class JNI {
return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args).GetS();
}
- static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static jshort CallShortMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1049,7 +1049,7 @@ class JNI {
InvokeVirtualOrInterfaceWithVarArgs(soa, obj, mid, args);
}
- static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* args) {
+ static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
@@ -1078,7 +1078,7 @@ class JNI {
}
static jobject CallNonvirtualObjectMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT(obj);
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
@@ -1107,7 +1107,7 @@ class JNI {
}
static jboolean CallNonvirtualBooleanMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1134,7 +1134,7 @@ class JNI {
}
static jbyte CallNonvirtualByteMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1161,7 +1161,7 @@ class JNI {
}
static jchar CallNonvirtualCharMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1188,7 +1188,7 @@ class JNI {
}
static jshort CallNonvirtualShortMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1215,7 +1215,7 @@ class JNI {
}
static jint CallNonvirtualIntMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1242,7 +1242,7 @@ class JNI {
}
static jlong CallNonvirtualLongMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1269,7 +1269,7 @@ class JNI {
}
static jfloat CallNonvirtualFloatMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1296,7 +1296,7 @@ class JNI {
}
static jdouble CallNonvirtualDoubleMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
@@ -1322,7 +1322,7 @@ class JNI {
}
static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(obj);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
@@ -1562,7 +1562,7 @@ class JNI {
return soa.AddLocalReference<jobject>(result.GetL());
}
- static jobject CallStaticObjectMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jobject CallStaticObjectMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
JValue result(InvokeWithJValues(soa, nullptr, mid, args));
@@ -1585,7 +1585,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetZ();
}
- static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jboolean CallStaticBooleanMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetZ();
@@ -1607,7 +1607,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetB();
}
- static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jbyte CallStaticByteMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetB();
@@ -1629,7 +1629,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetC();
}
- static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jchar CallStaticCharMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetC();
@@ -1651,7 +1651,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetS();
}
- static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jshort CallStaticShortMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetS();
@@ -1673,7 +1673,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetI();
}
- static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jint CallStaticIntMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetI();
@@ -1695,7 +1695,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetJ();
}
- static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jlong CallStaticLongMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetJ();
@@ -1717,7 +1717,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetF();
}
- static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jfloat CallStaticFloatMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetF();
@@ -1739,7 +1739,7 @@ class JNI {
return InvokeWithVarArgs(soa, nullptr, mid, args).GetD();
}
- static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static jdouble CallStaticDoubleMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(mid);
ScopedObjectAccess soa(env);
return InvokeWithJValues(soa, nullptr, mid, args).GetD();
@@ -1760,7 +1760,7 @@ class JNI {
InvokeWithVarArgs(soa, nullptr, mid, args);
}
- static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, jvalue* args) {
+ static void CallStaticVoidMethodA(JNIEnv* env, jclass, jmethodID mid, const jvalue* args) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
InvokeWithJValues(soa, nullptr, mid, args);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 44b0c2b007..26dba024c6 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -114,13 +114,17 @@ ClassExt* Class::EnsureExtDataPresent(Thread* self) {
bool set;
// Set the ext_data_ field using CAS semantics.
if (Runtime::Current()->IsActiveTransaction()) {
- set = h_this->CasFieldStrongSequentiallyConsistentObject<true>(ext_offset,
- ObjPtr<ClassExt>(nullptr),
- new_ext.Get());
+ set = h_this->CasFieldObject<true>(ext_offset,
+ nullptr,
+ new_ext.Get(),
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
} else {
- set = h_this->CasFieldStrongSequentiallyConsistentObject<false>(ext_offset,
- ObjPtr<ClassExt>(nullptr),
- new_ext.Get());
+ set = h_this->CasFieldObject<false>(ext_offset,
+ nullptr,
+ new_ext.Get(),
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
}
ObjPtr<ClassExt> ret(set ? new_ext.Get() : h_this->GetExtData());
DCHECK(!set || h_this->GetExtData() == new_ext.Get());
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 96778aa98d..faec6e6bf8 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -34,6 +34,7 @@
#include "mirror/method_type.h"
#include "obj_ptr.h"
#include "runtime.h"
+#include "write_barrier-inl.h"
#include <atomic>
@@ -76,7 +77,7 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<Stri
runtime->RecordResolveString(this, string_idx);
}
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
- runtime->GetHeap()->WriteBarrierEveryFieldOf(this);
+ WriteBarrier::ForEveryFieldWrite(this);
}
inline void DexCache::ClearString(dex::StringIndex string_idx) {
@@ -113,7 +114,7 @@ inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> res
GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+ WriteBarrier::ForEveryFieldWrite(this);
}
inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
@@ -145,7 +146,7 @@ inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodTyp
GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+ WriteBarrier::ForEveryFieldWrite(this);
}
inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) {
@@ -157,7 +158,8 @@ inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) {
return ref.load(std::memory_order_seq_cst).Read();
}
-inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite* call_site) {
+inline ObjPtr<CallSite> DexCache::SetResolvedCallSite(uint32_t call_site_idx,
+ ObjPtr<CallSite> call_site) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
DCHECK_LT(call_site_idx, GetDexFile()->NumCallSiteIds());
@@ -170,7 +172,7 @@ inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite*
reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target);
if (ref.CompareAndSetStrongSequentiallyConsistent(null_call_site, candidate)) {
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
+ WriteBarrier::ForEveryFieldWrite(this);
return call_site;
} else {
return target.Read();
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index bb86004a90..941248edf7 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -320,8 +320,8 @@ class MANAGED DexCache FINAL : public Object {
// because multiple threads can invoke the bootstrap method each
// producing a call site, but the method handle invocation on the
// call site must be on a common agreed value.
- CallSite* SetResolvedCallSite(uint32_t call_site_idx, CallSite* resolved) WARN_UNUSED
- REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjPtr<CallSite> SetResolvedCallSite(uint32_t call_site_idx, ObjPtr<CallSite> resolved)
+ REQUIRES_SHARED(Locks::mutator_lock_) WARN_UNUSED;
StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index ee4f53b695..47f0a298de 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -39,6 +39,7 @@
#include "runtime.h"
#include "string.h"
#include "throwable.h"
+#include "write_barrier-inl.h"
namespace art {
namespace mirror {
@@ -50,8 +51,7 @@ inline uint32_t Object::ClassSize(PointerSize pointer_size) {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline Class* Object::GetClass() {
- return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
- OFFSET_OF_OBJECT_MEMBER(Object, klass_));
+ return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(ClassOffset());
}
template<VerifyObjectFlags kVerifyFlags>
@@ -61,35 +61,20 @@ inline void Object::SetClass(ObjPtr<Class> new_klass) {
// backing cards, such as large objects.
// We use non transactional version since we can't undo this write. We also disable checking as
// we may run in transaction mode here.
- SetFieldObjectWithoutWriteBarrier<false, false,
- static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(
- OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass);
+ SetFieldObjectWithoutWriteBarrier<false, false, RemoveThisFlags(kVerifyFlags)>(ClassOffset(),
+ new_klass);
}
template<VerifyObjectFlags kVerifyFlags>
inline void Object::SetLockWord(LockWord new_val, bool as_volatile) {
// Force use of non-transactional mode and do not check.
if (as_volatile) {
- SetField32Volatile<false, false, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue());
+ SetField32Volatile<false, false, kVerifyFlags>(MonitorOffset(), new_val.GetValue());
} else {
- SetField32<false, false, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue());
+ SetField32<false, false, kVerifyFlags>(MonitorOffset(), new_val.GetValue());
}
}
-inline bool Object::CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakSequentiallyConsistent32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
-}
-
-inline bool Object::CasLockWordWeakAcquire(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakAcquire32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
-}
-
inline uint32_t Object::GetLockOwnerThreadId() {
return Monitor::GetLockOwnerThreadId(this);
}
@@ -119,19 +104,12 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
}
inline uint32_t Object::GetMarkBit() {
-#ifdef USE_READ_BARRIER
+ CHECK(kUseReadBarrier);
return GetLockWord(false).MarkBitState();
-#else
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
-#endif
}
inline void Object::SetReadBarrierState(uint32_t rb_state) {
- if (!kUseBakerReadBarrier) {
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- }
+ CHECK(kUseBakerReadBarrier);
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
LockWord lw = GetLockWord(false);
lw.SetReadBarrierState(rb_state);
@@ -141,9 +119,8 @@ inline void Object::SetReadBarrierState(uint32_t rb_state) {
inline void Object::AssertReadBarrierState() const {
CHECK(kUseBakerReadBarrier);
Object* obj = const_cast<Object*>(this);
- DCHECK(obj->GetReadBarrierState() == ReadBarrier::WhiteState())
- << "Bad Baker pointer: obj=" << reinterpret_cast<void*>(obj)
- << " rb_state" << reinterpret_cast<void*>(obj->GetReadBarrierState());
+ DCHECK_EQ(obj->GetReadBarrierState(), ReadBarrier::WhiteState())
+ << "Bad Baker pointer: obj=" << obj << " rb_state" << obj->GetReadBarrierState();
}
template<VerifyObjectFlags kVerifyFlags>
@@ -156,17 +133,16 @@ inline bool Object::VerifierInstanceOf(ObjPtr<Class> klass) {
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::InstanceOf(ObjPtr<Class> klass) {
DCHECK(klass != nullptr);
- DCHECK(GetClass<kVerifyNone>() != nullptr)
- << "this=" << std::hex << reinterpret_cast<uintptr_t>(this) << std::dec;
+ DCHECK(GetClass<kVerifyNone>() != nullptr) << "this=" << this;
return klass->IsAssignableFrom(GetClass<kVerifyFlags>());
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsClass() {
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
Class* java_lang_Class = GetClass<kVerifyFlags, kReadBarrierOption>()->
template GetClass<kVerifyFlags, kReadBarrierOption>();
- return GetClass<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis),
- kReadBarrierOption>() == java_lang_Class;
+ return GetClass<kNewFlags, kReadBarrierOption>() == java_lang_Class;
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -177,7 +153,7 @@ inline Class* Object::AsClass() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsObjectArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
return IsArrayInstance<kVerifyFlags, kReadBarrierOption>() &&
!GetClass<kNewFlags, kReadBarrierOption>()->
template GetComponentType<kNewFlags, kReadBarrierOption>()->IsPrimitive();
@@ -214,7 +190,7 @@ inline Array* Object::AsArray() {
template<VerifyObjectFlags kVerifyFlags>
inline BooleanArray* Object::AsBooleanArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->GetComponentType()->IsPrimitiveBoolean());
return down_cast<BooleanArray*>(this);
@@ -222,7 +198,7 @@ inline BooleanArray* Object::AsBooleanArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte());
return down_cast<ByteArray*>(this);
@@ -230,7 +206,7 @@ inline ByteArray* Object::AsByteArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ByteArray* Object::AsByteSizedArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveByte() ||
GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveBoolean());
@@ -239,7 +215,7 @@ inline ByteArray* Object::AsByteSizedArray() {
template<VerifyObjectFlags kVerifyFlags>
inline CharArray* Object::AsCharArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
return down_cast<CharArray*>(this);
@@ -247,7 +223,7 @@ inline CharArray* Object::AsCharArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ShortArray* Object::AsShortArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort());
return down_cast<ShortArray*>(this);
@@ -255,7 +231,7 @@ inline ShortArray* Object::AsShortArray() {
template<VerifyObjectFlags kVerifyFlags>
inline ShortArray* Object::AsShortSizedArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveShort() ||
GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveChar());
@@ -264,7 +240,7 @@ inline ShortArray* Object::AsShortSizedArray() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsIntArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
@@ -278,7 +254,7 @@ inline IntArray* Object::AsIntArray() {
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Object::IsLongArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
ObjPtr<Class> component_type = klass->GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -292,7 +268,7 @@ inline LongArray* Object::AsLongArray() {
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsFloatArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
return component_type != nullptr && component_type->template IsPrimitiveFloat<kNewFlags>();
}
@@ -300,7 +276,7 @@ inline bool Object::IsFloatArray() {
template<VerifyObjectFlags kVerifyFlags>
inline FloatArray* Object::AsFloatArray() {
DCHECK(IsFloatArray<kVerifyFlags>());
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveFloat());
return down_cast<FloatArray*>(this);
@@ -308,7 +284,7 @@ inline FloatArray* Object::AsFloatArray() {
template<VerifyObjectFlags kVerifyFlags>
inline bool Object::IsDoubleArray() {
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
auto* component_type = GetClass<kVerifyFlags>()->GetComponentType();
return component_type != nullptr && component_type->template IsPrimitiveDouble<kNewFlags>();
}
@@ -316,7 +292,7 @@ inline bool Object::IsDoubleArray() {
template<VerifyObjectFlags kVerifyFlags>
inline DoubleArray* Object::AsDoubleArray() {
DCHECK(IsDoubleArray<kVerifyFlags>());
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
DCHECK(GetClass<kVerifyFlags>()->IsArrayClass());
DCHECK(GetClass<kNewFlags>()->template GetComponentType<kNewFlags>()->IsPrimitiveDouble());
return down_cast<DoubleArray*>(this);
@@ -369,32 +345,25 @@ template<VerifyObjectFlags kVerifyFlags>
inline size_t Object::SizeOf() {
// Read barrier is never required for SizeOf since objects sizes are constant. Reading from-space
// values is OK because of that.
- static constexpr ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier;
+ static constexpr ReadBarrierOption kRBO = kWithoutReadBarrier;
size_t result;
- constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
- if (IsArrayInstance<kVerifyFlags, kReadBarrierOption>()) {
- result = AsArray<kNewFlags, kReadBarrierOption>()->
- template SizeOf<kNewFlags, kReadBarrierOption>();
- } else if (IsClass<kNewFlags, kReadBarrierOption>()) {
- result = AsClass<kNewFlags, kReadBarrierOption>()->
- template SizeOf<kNewFlags, kReadBarrierOption>();
- } else if (GetClass<kNewFlags, kReadBarrierOption>()->IsStringClass()) {
- result = AsString<kNewFlags, kReadBarrierOption>()->
- template SizeOf<kNewFlags>();
+ constexpr auto kNewFlags = RemoveThisFlags(kVerifyFlags);
+ if (IsArrayInstance<kVerifyFlags, kRBO>()) {
+ result = AsArray<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
+ } else if (IsClass<kNewFlags, kRBO>()) {
+ result = AsClass<kNewFlags, kRBO>()->template SizeOf<kNewFlags, kRBO>();
+ } else if (GetClass<kNewFlags, kRBO>()->IsStringClass()) {
+ result = AsString<kNewFlags, kRBO>()->template SizeOf<kNewFlags>();
} else {
- result = GetClass<kNewFlags, kReadBarrierOption>()->
- template GetObjectSize<kNewFlags, kReadBarrierOption>();
+ result = GetClass<kNewFlags, kRBO>()->template GetObjectSize<kNewFlags, kRBO>();
}
- DCHECK_GE(result, sizeof(Object))
- << " class=" << Class::PrettyClass(GetClass<kNewFlags, kReadBarrierOption>());
+ DCHECK_GE(result, sizeof(Object)) << " class=" << Class::PrettyClass(GetClass<kNewFlags, kRBO>());
return result;
}
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline int8_t Object::GetFieldByte(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<int8_t, kIsVolatile>(field_offset);
}
@@ -412,11 +381,8 @@ template<bool kTransactionActive,
bool kCheckTransaction,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
-inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) {
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldBoolean(
this,
@@ -424,9 +390,7 @@ inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value
GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<uint8_t, kIsVolatile>(field_offset, new_value);
}
@@ -434,20 +398,15 @@ template<bool kTransactionActive,
bool kCheckTransaction,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
-inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value) {
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldByte(this,
field_offset,
GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<int8_t, kIsVolatile>(field_offset, new_value);
}
@@ -465,17 +424,13 @@ inline void Object::SetFieldByteVolatile(MemberOffset field_offset, int8_t new_v
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline uint16_t Object::GetFieldChar(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<uint16_t, kIsVolatile>(field_offset);
}
template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
inline int16_t Object::GetFieldShort(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<int16_t, kIsVolatile>(field_offset);
}
@@ -494,18 +449,14 @@ template<bool kTransactionActive,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldChar(this,
field_offset,
GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<uint16_t, kIsVolatile>(field_offset, new_value);
}
@@ -514,18 +465,14 @@ template<bool kTransactionActive,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldShort(MemberOffset field_offset, int16_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldChar(this,
field_offset,
GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<int16_t, kIsVolatile>(field_offset, new_value);
}
@@ -546,18 +493,14 @@ template<bool kTransactionActive,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteField32(this,
field_offset,
GetField32<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<int32_t, kIsVolatile>(field_offset, new_value);
}
@@ -575,101 +518,19 @@ inline void Object::SetField32Transaction(MemberOffset field_offset, int32_t new
}
}
-// TODO: Pass memory_order_ and strong/weak as arguments to avoid code duplication?
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakAcquire32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakAcquire(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakRelease32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetWeakRelease(old_value, new_value);
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
-
- return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
-}
-
template<bool kTransactionActive,
bool kCheckTransaction,
VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetField64(MemberOffset field_offset, int64_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteField64(this,
field_offset,
GetField64<kVerifyFlags, kIsVolatile>(field_offset),
kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
SetField<int64_t, kIsVolatile>(field_offset, new_value);
}
@@ -699,15 +560,11 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer
inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset,
int64_t old_value,
int64_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteField64(this, field_offset, old_value, true);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_value, new_value);
@@ -717,15 +574,11 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer
inline bool Object::CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset,
int64_t old_value,
int64_t new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
Runtime::Current()->RecordWriteField64(this, field_offset, old_value, true);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<int64_t>* atomic_addr = reinterpret_cast<Atomic<int64_t>*>(raw_addr);
return atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_value, new_value);
@@ -736,18 +589,14 @@ template<class T,
ReadBarrierOption kReadBarrierOption,
bool kIsVolatile>
inline T* Object::GetFieldObject(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
T* result = ReadBarrier::Barrier<T, kIsVolatile, kReadBarrierOption>(
this,
field_offset,
objref_addr);
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(result);
- }
+ VerifyRead<kVerifyFlags>(result);
return result;
}
@@ -762,9 +611,7 @@ template<bool kTransactionActive,
bool kIsVolatile>
inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
ObjPtr<Object> obj;
if (kIsVolatile) {
@@ -774,12 +621,8 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
}
Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj, true);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
+ Verify<kVerifyFlags>();
+ VerifyWrite<kVerifyFlags>(new_value);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
objref_addr->Assign<kIsVolatile>(new_value.Ptr());
@@ -793,7 +636,7 @@ inline void Object::SetFieldObject(MemberOffset field_offset, ObjPtr<Object> new
SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags,
kIsVolatile>(field_offset, new_value);
if (new_value != nullptr) {
- Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
+ WriteBarrier::ForFieldWrite<WriteBarrier::kWithoutNullCheck>(this, field_offset, new_value);
// TODO: Check field assignment could theoretically cause thread suspension, TODO: fix this.
CheckFieldAssignment(field_offset, new_value);
}
@@ -816,42 +659,19 @@ inline void Object::SetFieldObjectTransaction(MemberOffset field_offset, ObjPtr<
template <VerifyObjectFlags kVerifyFlags>
inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset field_offset) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return reinterpret_cast<HeapReference<Object>*>(reinterpret_cast<uint8_t*>(this) +
field_offset.Int32Value());
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- bool success = CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier<
- kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
- if (success) {
- Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
- }
- return success;
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
+inline bool Object::CasFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value,
+ CASMode mode,
+ std::memory_order memory_order) {
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
+ VerifyCAS<kVerifyFlags>(new_value, old_value);
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
@@ -859,107 +679,24 @@ inline bool Object::CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(
uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetWeakSequentiallyConsistent(old_ref, new_ref);
- return success;
+ return atomic_addr->CompareAndSet(old_ref, new_ref, mode, memory_order);
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- bool success = CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier<
- kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset, old_value, new_value);
+inline bool Object::CasFieldObject(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value,
+ CASMode mode,
+ std::memory_order memory_order) {
+ bool success = CasFieldObjectWithoutWriteBarrier<
+ kTransactionActive, kCheckTransaction, kVerifyFlags>(field_offset,
+ old_value,
+ new_value,
+ mode,
+ memory_order);
if (success) {
- Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
- }
- return success;
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
- }
- uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
- uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetStrongSequentiallyConsistent(old_ref, new_ref);
- return success;
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakRelaxedObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
- }
- uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
- uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetWeakRelaxed(old_ref, new_ref);
- return success;
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakReleaseObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
+ WriteBarrier::ForFieldWrite(this, field_offset, new_value);
}
- uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
- uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetWeakRelease(old_ref, new_ref);
return success;
}
@@ -967,18 +704,8 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer
inline ObjPtr<Object> Object::CompareAndExchangeFieldObject(MemberOffset field_offset,
ObjPtr<Object> old_value,
ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
+ VerifyCAS<kVerifyFlags>(new_value, old_value);
uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
@@ -989,27 +716,22 @@ inline ObjPtr<Object> Object::CompareAndExchangeFieldObject(MemberOffset field_o
// Ensure caller has done read barrier on the reference field so it's in the to-space.
ReadBarrier::AssertToSpaceInvariant(witness_value.Ptr());
}
- if (kTransactionActive && success) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, witness_value, true);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(witness_value);
+ if (success) {
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, witness_value, true);
+ }
+ WriteBarrier::ForFieldWrite(this, field_offset, new_value);
}
+ VerifyRead<kVerifyFlags>(witness_value);
return witness_value;
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
+ VerifyCAS<kVerifyFlags>(new_value, /*old_value*/ nullptr);
+
uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
@@ -1022,17 +744,14 @@ inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset,
if (kTransactionActive) {
Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
}
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
+ WriteBarrier::ForFieldWrite(this, field_offset, new_value);
+ VerifyRead<kVerifyFlags>(old_value);
return old_value;
}
template<typename T, VerifyObjectFlags kVerifyFlags>
inline void Object::GetPrimitiveFieldViaAccessor(MemberOffset field_offset, Accessor<T>* accessor) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
T* addr = reinterpret_cast<T*>(raw_addr);
accessor->Access(addr);
@@ -1041,17 +760,13 @@ inline void Object::GetPrimitiveFieldViaAccessor(MemberOffset field_offset, Acce
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateFieldBooleanViaAccessor(MemberOffset field_offset,
Accessor<uint8_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
uint8_t old_value = GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteFieldBoolean(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
uint8_t* addr = raw_addr;
accessor->Access(addr);
@@ -1060,17 +775,13 @@ inline void Object::UpdateFieldBooleanViaAccessor(MemberOffset field_offset,
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateFieldByteViaAccessor(MemberOffset field_offset,
Accessor<int8_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
int8_t old_value = GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteFieldByte(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
int8_t* addr = reinterpret_cast<int8_t*>(raw_addr);
accessor->Access(addr);
@@ -1079,17 +790,13 @@ inline void Object::UpdateFieldByteViaAccessor(MemberOffset field_offset,
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateFieldCharViaAccessor(MemberOffset field_offset,
Accessor<uint16_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
uint16_t old_value = GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteFieldChar(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
uint16_t* addr = reinterpret_cast<uint16_t*>(raw_addr);
accessor->Access(addr);
@@ -1098,17 +805,13 @@ inline void Object::UpdateFieldCharViaAccessor(MemberOffset field_offset,
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateFieldShortViaAccessor(MemberOffset field_offset,
Accessor<int16_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
int16_t old_value = GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteFieldShort(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
int16_t* addr = reinterpret_cast<int16_t*>(raw_addr);
accessor->Access(addr);
@@ -1117,17 +820,13 @@ inline void Object::UpdateFieldShortViaAccessor(MemberOffset field_offset,
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateField32ViaAccessor(MemberOffset field_offset,
Accessor<int32_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
int32_t old_value = GetField32<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteField32(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
int32_t* addr = reinterpret_cast<int32_t*>(raw_addr);
accessor->Access(addr);
@@ -1136,17 +835,13 @@ inline void Object::UpdateField32ViaAccessor(MemberOffset field_offset,
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void Object::UpdateField64ViaAccessor(MemberOffset field_offset,
Accessor<int64_t>* accessor) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
+ VerifyTransaction<kTransactionActive, kCheckTransaction>();
if (kTransactionActive) {
static const bool kIsVolatile = true;
int64_t old_value = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
Runtime::Current()->RecordWriteField64(this, field_offset, old_value, kIsVolatile);
}
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
accessor->Access(addr);
@@ -1233,6 +928,13 @@ inline mirror::DexCache* Object::AsDexCache() {
return down_cast<mirror::DexCache*>(this);
}
+template<bool kTransactionActive, bool kCheckTransaction>
+inline void Object::VerifyTransaction() {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index aeaa850abe..df50d0613a 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -32,14 +32,17 @@ namespace mirror {
template<VerifyObjectFlags kVerifyFlags>
inline LockWord Object::GetLockWord(bool as_volatile) {
if (as_volatile) {
- return LockWord(GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset()));
}
- return LockWord(GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ return LockWord(GetField32<kVerifyFlags>(MonitorOffset()));
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
- int32_t old_value, int32_t new_value) {
+inline bool Object::CasField32(MemberOffset field_offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode,
+ std::memory_order memory_order) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -52,19 +55,19 @@ inline bool Object::CasFieldWeakRelaxed32(MemberOffset field_offset,
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
- return atomic_addr->CompareAndSetWeakRelaxed(old_value, new_value);
+ return atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order);
}
-inline bool Object::CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) {
+inline bool Object::CasLockWord(LockWord old_val,
+ LockWord new_val,
+ CASMode mode,
+ std::memory_order memory_order) {
// Force use of non-transactional mode and do not check.
- return CasFieldWeakRelaxed32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
-}
-
-inline bool Object::CasLockWordWeakRelease(LockWord old_val, LockWord new_val) {
- // Force use of non-transactional mode and do not check.
- return CasFieldWeakRelease32<false, false>(
- OFFSET_OF_OBJECT_MEMBER(Object, monitor_), old_val.GetValue(), new_val.GetValue());
+ return CasField32<false, false>(MonitorOffset(),
+ old_val.GetValue(),
+ new_val.GetValue(),
+ mode,
+ memory_order);
}
inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
@@ -128,7 +131,7 @@ inline uint32_t Object::GetReadBarrierState() {
UNREACHABLE();
}
DCHECK(kUseBakerReadBarrier);
- LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ LockWord lw(GetField<uint32_t, /*kIsVolatile*/false>(MonitorOffset()));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
@@ -139,13 +142,13 @@ inline uint32_t Object::GetReadBarrierStateAcquire() {
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
- LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ LockWord lw(GetFieldAcquire<uint32_t>(MonitorOffset()));
uint32_t rb_state = lw.ReadBarrierState();
DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
return rb_state;
}
-template<bool kCasRelease>
+template<std::memory_order kMemoryOrder>
inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
if (!kUseBakerReadBarrier) {
LOG(FATAL) << "Unreachable";
@@ -169,9 +172,7 @@ inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32
// If kCasRelease == true, use a CAS release so that when GC updates all the fields of
// an object and then changes the object from gray to black, the field updates (stores) will be
// visible (won't be reordered after this CAS.)
- } while (!(kCasRelease ?
- CasLockWordWeakRelease(expected_lw, new_lw) :
- CasLockWordWeakRelaxed(expected_lw, new_lw)));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
return true;
}
@@ -188,68 +189,10 @@ inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_b
new_lw = lw;
new_lw.SetMarkBitState(mark_bit);
// Since this is only set from the mutator, we can use the non-release CAS.
- } while (!CasLockWordWeakRelaxed(expected_lw, new_lw));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed));
return true;
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongRelaxedObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
- }
- uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
- uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetStrongRelaxed(old_ref, new_ref);
- return success;
-}
-
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline bool Object::CasFieldStrongReleaseObjectWithoutWriteBarrier(
- MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value) {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
- if (kVerifyFlags & kVerifyWrites) {
- VerifyObject(new_value);
- }
- if (kVerifyFlags & kVerifyReads) {
- VerifyObject(old_value);
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset, old_value, true);
- }
- uint32_t old_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(old_value));
- uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value));
- uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
- Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr);
-
- bool success = atomic_addr->CompareAndSetStrongRelease(old_ref, new_ref);
- return success;
-}
-
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 4240e702b5..025c10bc2a 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -121,16 +121,15 @@ Object* Object::CopyObject(ObjPtr<mirror::Object> dest,
CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
src->VisitReferences(visitor, visitor);
}
- gc::Heap* heap = Runtime::Current()->GetHeap();
// Perform write barriers on copied object references.
ObjPtr<Class> c = src->GetClass();
if (c->IsArrayClass()) {
if (!c->GetComponentType()->IsPrimitive()) {
ObjectArray<Object>* array = dest->AsObjectArray<Object>();
- heap->WriteBarrierArray(dest, 0, array->GetLength());
+ WriteBarrier::ForArrayWrite(dest, 0, array->GetLength());
}
} else {
- heap->WriteBarrierEveryFieldOf(dest);
+ WriteBarrier::ForEveryFieldWrite(dest);
}
return dest.Ptr();
}
@@ -197,7 +196,9 @@ int32_t Object::IdentityHashCode() {
// loop iteration.
LockWord hash_word = LockWord::FromHashCode(GenerateIdentityHashCode(), lw.GCState());
DCHECK_EQ(hash_word.GetState(), LockWord::kHashCode);
- if (current_this->CasLockWordWeakRelaxed(lw, hash_word)) {
+ // Use a strong CAS to prevent spurious failures since these can make the boot image
+ // non-deterministic.
+ if (current_this->CasLockWord(lw, hash_word, CASMode::kStrong, std::memory_order_relaxed)) {
return hash_word.GetHashCode();
}
break;
@@ -281,10 +282,7 @@ ArtField* Object::FindFieldByOffset(MemberOffset offset) {
}
std::string Object::PrettyTypeOf(ObjPtr<mirror::Object> obj) {
- if (obj == nullptr) {
- return "null";
- }
- return obj->PrettyTypeOf();
+ return (obj == nullptr) ? "null" : obj->PrettyTypeOf();
}
std::string Object::PrettyTypeOf() {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index a89d6323a5..c7cffed69b 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -106,12 +106,9 @@ class MANAGED LOCKABLE Object {
// Get the read barrier state with a load-acquire.
ALWAYS_INLINE uint32_t GetReadBarrierStateAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
-#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
- NO_RETURN
-#endif
ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kCasRelease = false>
+ template<std::memory_order kMemoryOrder = std::memory_order_relaxed>
ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -151,13 +148,7 @@ class MANAGED LOCKABLE Object {
LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakAcquire(LockWord old_val, LockWord new_val)
- REQUIRES_SHARED(Locks::mutator_lock_);
- bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
+ bool CasLockWord(LockWord old_val, LockWord new_val, CASMode mode, std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
@@ -324,30 +315,20 @@ class MANAGED LOCKABLE Object {
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
+ ALWAYS_INLINE bool CasFieldObject(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value,
+ CASMode mode,
+ std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
+ ALWAYS_INLINE bool CasFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
+ ObjPtr<Object> old_value,
+ ObjPtr<Object> new_value,
+ CASMode mode,
+ std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive,
@@ -364,45 +345,14 @@ class MANAGED LOCKABLE Object {
ObjPtr<Object> ExchangeFieldObject(MemberOffset field_offset, ObjPtr<Object> new_value)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldWeakReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasFieldStrongReleaseObjectWithoutWriteBarrier(MemberOffset field_offset,
- ObjPtr<Object> old_value,
- ObjPtr<Object> new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
+ HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset)
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<uint8_t, kIsVolatile>(field_offset);
}
@@ -489,9 +439,7 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<int32_t, kIsVolatile>(field_offset);
}
@@ -523,49 +471,17 @@ class MANAGED LOCKABLE Object {
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakRelaxed32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakAcquire32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldWeakRelease32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
- template<bool kTransactionActive,
- bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset,
- int32_t old_value,
- int32_t new_value)
+ ALWAYS_INLINE bool CasField32(MemberOffset field_offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode,
+ std::memory_order memory_order)
REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kVerifyFlags & kVerifyThis) {
- VerifyObject(this);
- }
+ Verify<kVerifyFlags>();
return GetField<int64_t, kIsVolatile>(field_offset);
}
@@ -735,8 +651,7 @@ class MANAGED LOCKABLE Object {
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr64(MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
- PointerSize::k64);
+ return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, PointerSize::k64);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
@@ -810,6 +725,39 @@ class MANAGED LOCKABLE Object {
}
}
+ template<VerifyObjectFlags kVerifyFlags>
+ ALWAYS_INLINE void Verify() {
+ if (kVerifyFlags & kVerifyThis) {
+ VerifyObject(this);
+ }
+ }
+
+ // Not ObjPtr since the values may be unaligned for logic in verification.cc.
+ template<VerifyObjectFlags kVerifyFlags, typename Reference>
+ ALWAYS_INLINE static void VerifyRead(Reference value) {
+ if (kVerifyFlags & kVerifyReads) {
+ VerifyObject(value);
+ }
+ }
+
+ template<VerifyObjectFlags kVerifyFlags>
+ ALWAYS_INLINE static void VerifyWrite(ObjPtr<mirror::Object> value) {
+ if (kVerifyFlags & kVerifyWrites) {
+ VerifyObject(value);
+ }
+ }
+
+ template<VerifyObjectFlags kVerifyFlags>
+ ALWAYS_INLINE void VerifyCAS(ObjPtr<mirror::Object> new_value, ObjPtr<mirror::Object> old_value) {
+ Verify<kVerifyFlags>();
+ VerifyRead<kVerifyFlags>(old_value);
+ VerifyWrite<kVerifyFlags>(new_value);
+ }
+
+ // Verify transaction is active (if required).
+ template<bool kTransactionActive, bool kCheckTransaction>
+ ALWAYS_INLINE void VerifyTransaction();
+
// A utility function that copies an object in a read barrier and write barrier-aware way.
// This is internally used by Clone() and Class::CopyOf(). If the object is finalizable,
// it is the callers job to call Heap::AddFinalizerReference.
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index ed3c567464..1d2f47f86a 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -32,6 +32,7 @@
#include "object-inl.h"
#include "runtime.h"
#include "thread.h"
+#include "write_barrier-inl.h"
namespace art {
namespace mirror {
@@ -197,7 +198,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos,
}
}
}
- Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+ WriteBarrier::ForArrayWrite(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -246,7 +247,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos,
SetWithoutChecksAndWriteBarrier<false>(dst_pos + i, obj);
}
}
- Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+ WriteBarrier::ForArrayWrite(this, dst_pos, count);
if (kIsDebugBuild) {
for (int i = 0; i < count; ++i) {
// The get will perform the VerifyObject.
@@ -328,7 +329,7 @@ inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos,
}
}
}
- Runtime::Current()->GetHeap()->WriteBarrierArray(this, dst_pos, count);
+ WriteBarrier::ForArrayWrite(this, dst_pos, count);
if (UNLIKELY(i != count)) {
std::string actualSrcType(mirror::Object::PrettyTypeOf(o));
std::string dstType(PrettyTypeOf());
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index 4319c5df25..56c953b816 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1021,15 +1021,17 @@ bool FieldAccessor<ObjPtr<Object>>::Dispatch(VarHandle::AccessMode access_mode,
ObjPtr<Object> desired_value = ValueGetter<ObjPtr<Object>>::Get(getter);
bool cas_result;
if (Runtime::Current()->IsActiveTransaction()) {
- cas_result = obj->CasFieldStrongSequentiallyConsistentObject<kTransactionActive>(
- field_offset,
- expected_value,
- desired_value);
+ cas_result = obj->CasFieldObject<kTransactionActive>(field_offset,
+ expected_value,
+ desired_value,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
} else {
- cas_result = obj->CasFieldStrongSequentiallyConsistentObject<kTransactionInactive>(
- field_offset,
- expected_value,
- desired_value);
+ cas_result = obj->CasFieldObject<kTransactionInactive>(field_offset,
+ expected_value,
+ desired_value,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
}
StoreResult(cas_result, result);
break;
@@ -1043,15 +1045,18 @@ bool FieldAccessor<ObjPtr<Object>>::Dispatch(VarHandle::AccessMode access_mode,
ObjPtr<Object> desired_value = ValueGetter<ObjPtr<Object>>::Get(getter);
bool cas_result;
if (Runtime::Current()->IsActiveTransaction()) {
- cas_result = obj->CasFieldWeakSequentiallyConsistentObject<kTransactionActive>(
- field_offset,
- expected_value,
- desired_value);
+ cas_result = obj->CasFieldObject<kTransactionActive>(field_offset,
+ expected_value,
+ desired_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
} else {
- cas_result = obj->CasFieldWeakSequentiallyConsistentObject<kTransactionInactive>(
+ cas_result = obj->CasFieldObject<kTransactionInactive>(
field_offset,
expected_value,
- desired_value);
+ desired_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
}
StoreResult(cas_result, result);
break;
@@ -1064,15 +1069,13 @@ bool FieldAccessor<ObjPtr<Object>>::Dispatch(VarHandle::AccessMode access_mode,
ObjPtr<Object> desired_value = ValueGetter<ObjPtr<Object>>::Get(getter);
ObjPtr<Object> witness_value;
if (Runtime::Current()->IsActiveTransaction()) {
- witness_value = obj->CompareAndExchangeFieldObject<kTransactionActive>(
- field_offset,
- expected_value,
- desired_value);
+ witness_value = obj->CompareAndExchangeFieldObject<kTransactionActive>(field_offset,
+ expected_value,
+ desired_value);
} else {
- witness_value = obj->CompareAndExchangeFieldObject<kTransactionInactive>(
- field_offset,
- expected_value,
- desired_value);
+ witness_value = obj->CompareAndExchangeFieldObject<kTransactionInactive>(field_offset,
+ expected_value,
+ desired_value);
}
StoreResult(witness_value, result);
break;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 2c38de5dae..d47bc0d12e 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -134,13 +134,15 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
}
int32_t Monitor::GetHashCode() {
- while (!HasHashCode()) {
- if (hash_code_.CompareAndSetWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
- break;
- }
+ int32_t hc = hash_code_.load(std::memory_order_relaxed);
+ if (!HasHashCode()) {
+ // Use a strong CAS to prevent spurious failures since these can make the boot image
+ // non-deterministic.
+ hash_code_.CompareAndSetStrongRelaxed(0, mirror::Object::GenerateIdentityHashCode());
+ hc = hash_code_.load(std::memory_order_relaxed);
}
DCHECK(HasHashCode());
- return hash_code_.load(std::memory_order_relaxed);
+ return hc;
}
bool Monitor::Install(Thread* self) {
@@ -173,7 +175,7 @@ bool Monitor::Install(Thread* self) {
}
LockWord fat(this, lw.GCState());
// Publish the updated lock word, which may race with other threads.
- bool success = GetObject()->CasLockWordWeakRelease(lw, fat);
+ bool success = GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
// Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
@@ -1039,7 +1041,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
case LockWord::kUnlocked: {
// No ordering required for preceding lockword read, since we retest.
LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
- if (h_obj->CasLockWordWeakAcquire(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
@@ -1063,7 +1065,10 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj, bool tr
return h_obj.Get(); // Success!
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakRelaxed(lock_word, thin_locked)) {
+ if (h_obj->CasLockWord(lock_word,
+ thin_locked,
+ CASMode::kWeak,
+ std::memory_order_relaxed)) {
AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */);
return h_obj.Get(); // Success!
}
@@ -1165,7 +1170,7 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
return true;
} else {
// Use CAS to preserve the read barrier state.
- if (h_obj->CasLockWordWeakRelease(lock_word, new_lw)) {
+ if (h_obj->CasLockWord(lock_word, new_lw, CASMode::kWeak, std::memory_order_release)) {
AtraceMonitorUnlock();
// Success!
return true;
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a961cb2597..e54674f72b 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -60,6 +60,7 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho
static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Constructor> m = soa.Decode<mirror::Constructor>(javaMethod);
+ ArtMethod* constructor_art_method = m->GetArtMethod();
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
@@ -100,18 +101,20 @@ static jobject Constructor_newInstance0(JNIEnv* env, jobject javaMethod, jobject
}
// String constructor is replaced by a StringFactory method in InvokeMethod.
- if (c->IsStringClass()) {
+ if (UNLIKELY(c->IsStringClass())) {
return InvokeMethod(soa, javaMethod, nullptr, javaArgs, 2);
}
ObjPtr<mirror::Object> receiver =
movable ? c->AllocObject(soa.Self()) : c->AllocNonMovableObject(soa.Self());
- if (receiver == nullptr) {
+ if (UNLIKELY(receiver == nullptr)) {
+ DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
jobject javaReceiver = soa.AddLocalReference<jobject>(receiver);
- InvokeMethod(soa, javaMethod, javaReceiver, javaArgs, 2);
- // Constructors are ()V methods, so we shouldn't touch the result of InvokeMethod.
+
+ InvokeConstructor(soa, constructor_art_method, receiver, javaArgs);
+
return javaReceiver;
}
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index d41a19556e..46444808d7 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -41,9 +41,11 @@ static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj,
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::Object> obj = soa.Decode<mirror::Object>(javaObj);
// JNI must use non transactional mode.
- bool success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue,
- newValue);
+ bool success = obj->CasField32<false>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
return success ? JNI_TRUE : JNI_FALSE;
}
@@ -78,9 +80,11 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb
MemberOffset(offset),
field_addr);
}
- bool success = obj->CasFieldStrongSequentiallyConsistentObject<false>(MemberOffset(offset),
- expectedValue,
- newValue);
+ bool success = obj->CasFieldObject<false>(MemberOffset(offset),
+ expectedValue,
+ newValue,
+ CASMode::kStrong,
+ std::memory_order_seq_cst);
return success ? JNI_TRUE : JNI_FALSE;
}
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 14f3f45f9e..ce295aacde 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -289,10 +289,7 @@ void DumpNativeStack(std::ostream& os,
ArtMethod* current_method,
void* ucontext_ptr,
bool skip_frames) {
- // b/18119146
- if (RUNNING_ON_MEMORY_TOOL != 0) {
- return;
- }
+ // Historical note: This was disabled when running under Valgrind (b/18119146).
BacktraceMap* map = existing_map;
std::unique_ptr<BacktraceMap> tmp_map;
diff --git a/runtime/oat.h b/runtime/oat.h
index 72eb27d69e..6c3cc20032 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Rewrite TypeLookupTable.
- static constexpr uint8_t kOatVersion[] = { '1', '4', '7', '\0' };
+ // Last oat version changed reason: Remove explicit size from CodeInfo.
+ static constexpr uint8_t kOatVersion[] = { '1', '5', '1', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 2b05b0e3dd..58e16ed1b7 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -686,7 +686,7 @@ bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* e
return false;
}
}
- dex_file_pointer = uncompressed_dex_files_.get()->at(i)->Begin();
+ dex_file_pointer = (*uncompressed_dex_files_)[i]->Begin();
} else {
// Do not support mixed-mode oat files.
if (uncompressed_dex_files_ != nullptr) {
@@ -1063,7 +1063,8 @@ bool DlOpenOatFile::Dlopen(const std::string& elf_filename,
dlopen_handle_ = android_dlopen_ext(absolute_path.get(), RTLD_NOW, &extinfo);
#else
UNUSED(oat_file_begin);
- static_assert(!kIsTargetBuild || kIsTargetLinux, "host_dlopen_handles_ will leak handles");
+ static_assert(!kIsTargetBuild || kIsTargetLinux || kIsTargetFuchsia,
+ "host_dlopen_handles_ will leak handles");
MutexLock mu(Thread::Current(), *Locks::host_dlopen_handles_lock_);
dlopen_handle_ = dlopen(absolute_path.get(), RTLD_NOW);
if (dlopen_handle_ != nullptr) {
@@ -1597,9 +1598,9 @@ ArrayRef<GcRoot<mirror::Object>> OatFile::GetBssGcRoots() const {
}
}
-const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
- const uint32_t* dex_location_checksum,
- std::string* error_msg) const {
+const OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
+ const uint32_t* dex_location_checksum,
+ std::string* error_msg) const {
// NOTE: We assume here that the canonical location for a given dex_location never
// changes. If it does (i.e. some symlink used by the filename changes) we may return
// an incorrect OatDexFile. As long as we have a checksum to check, we shall return
@@ -1608,7 +1609,7 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
// TODO: Additional analysis of usage patterns to see if this can be simplified
// without any performance loss, for example by not doing the first lock-free lookup.
- const OatFile::OatDexFile* oat_dex_file = nullptr;
+ const OatDexFile* oat_dex_file = nullptr;
StringPiece key(dex_location);
// Try to find the key cheaply in the oat_dex_files_ map which holds dex locations
// directly mentioned in the oat file and doesn't require locking.
@@ -1666,17 +1667,17 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
return oat_dex_file;
}
-OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
- const std::string& dex_file_location,
- const std::string& canonical_dex_file_location,
- uint32_t dex_file_location_checksum,
- const uint8_t* dex_file_pointer,
- const uint8_t* lookup_table_data,
- const IndexBssMapping* method_bss_mapping_data,
- const IndexBssMapping* type_bss_mapping_data,
- const IndexBssMapping* string_bss_mapping_data,
- const uint32_t* oat_class_offsets_pointer,
- const DexLayoutSections* dex_layout_sections)
+OatDexFile::OatDexFile(const OatFile* oat_file,
+ const std::string& dex_file_location,
+ const std::string& canonical_dex_file_location,
+ uint32_t dex_file_location_checksum,
+ const uint8_t* dex_file_pointer,
+ const uint8_t* lookup_table_data,
+ const IndexBssMapping* method_bss_mapping_data,
+ const IndexBssMapping* type_bss_mapping_data,
+ const IndexBssMapping* string_bss_mapping_data,
+ const uint32_t* oat_class_offsets_pointer,
+ const DexLayoutSections* dex_layout_sections)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
canonical_dex_file_location_(canonical_dex_file_location),
@@ -1707,16 +1708,15 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
}
}
-OatFile::OatDexFile::OatDexFile(TypeLookupTable&& lookup_table)
- : lookup_table_(std::move(lookup_table)) {}
+OatDexFile::OatDexFile(TypeLookupTable&& lookup_table) : lookup_table_(std::move(lookup_table)) {}
-OatFile::OatDexFile::~OatDexFile() {}
+OatDexFile::~OatDexFile() {}
-size_t OatFile::OatDexFile::FileSize() const {
+size_t OatDexFile::FileSize() const {
return reinterpret_cast<const DexFile::Header*>(dex_file_pointer_)->file_size_;
}
-std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
+std::unique_ptr<const DexFile> OatDexFile::OpenDexFile(std::string* error_msg) const {
ScopedTrace trace(__PRETTY_FUNCTION__);
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
@@ -1731,11 +1731,11 @@ std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* err
error_msg);
}
-uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
+uint32_t OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
return oat_class_offsets_pointer_[class_def_index];
}
-OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) const {
+OatFile::OatClass OatDexFile::GetOatClass(uint16_t class_def_index) const {
uint32_t oat_class_offset = GetOatClassOffset(class_def_index);
const uint8_t* oat_class_pointer = oat_file_->Begin() + oat_class_offset;
@@ -1777,10 +1777,10 @@ OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) con
reinterpret_cast<const OatMethodOffsets*>(methods_pointer));
}
-const DexFile::ClassDef* OatFile::OatDexFile::FindClassDef(const DexFile& dex_file,
- const char* descriptor,
- size_t hash) {
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+const DexFile::ClassDef* OatDexFile::FindClassDef(const DexFile& dex_file,
+ const char* descriptor,
+ size_t hash) {
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
DCHECK_EQ(ComputeModifiedUtf8Hash(descriptor), hash);
bool used_lookup_table = false;
const DexFile::ClassDef* lookup_table_classdef = nullptr;
@@ -1828,7 +1828,7 @@ void OatDexFile::MadviseDexFile(const DexFile& dex_file, MadviseState state) {
dex_file.Begin() + dex_file.Size(),
MADV_RANDOM);
}
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file != nullptr) {
// Should always be there.
const DexLayoutSections* const sections = oat_dex_file->GetDexLayoutSections();
@@ -1946,7 +1946,7 @@ OatFile::OatClass OatFile::FindOatClass(const DexFile& dex_file,
uint16_t class_def_idx,
bool* found) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
*found = false;
return OatFile::OatClass::Invalid();
@@ -1955,7 +1955,7 @@ OatFile::OatClass OatFile::FindOatClass(const DexFile& dex_file,
return oat_dex_file->GetOatClass(class_def_idx);
}
-void OatFile::OatDexFile::AssertAotCompiler() {
+void OatDexFile::AssertAotCompiler() {
CHECK(Runtime::Current()->IsAotCompiler());
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index d72b6a8971..5f87bf0f99 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -70,8 +70,6 @@ class OatFile {
// Special classpath that skips shared library check.
static constexpr const char* kSpecialSharedLibrary = "&";
- typedef art::OatDexFile OatDexFile;
-
// Opens an oat file contained within the given elf file. This is always opened as
// non-executable at the moment.
static OatFile* OpenWithElfFile(int zip_fd,
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6c869cada5..f7c74cc23b 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -354,7 +354,7 @@ bool OatFileAssistant::LoadDexFiles(
std::vector<std::unique_ptr<const DexFile>>* out_dex_files) {
// Load the main dex file.
std::string error_msg;
- const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
+ const OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
dex_location.c_str(), nullptr, &error_msg);
if (oat_dex_file == nullptr) {
LOG(WARNING) << error_msg;
@@ -453,7 +453,7 @@ bool OatFileAssistant::DexChecksumUpToDate(const OatFile& file, std::string* err
for (uint32_t i = 0; i < number_of_dex_files; i++) {
std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str());
uint32_t expected_checksum = (*required_dex_checksums)[i];
- const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(dex.c_str(), nullptr);
+ const OatDexFile* oat_dex_file = file.GetOatDexFile(dex.c_str(), nullptr);
if (oat_dex_file == nullptr) {
*error_msg = StringPrintf("failed to find %s in %s", dex.c_str(), file.GetLocation().c_str());
return false;
@@ -921,7 +921,7 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
required_dex_checksums_found_ = true;
for (size_t i = 0; i < odex_file->GetOatHeader().GetDexFileCount(); i++) {
std::string dex = DexFileLoader::GetMultiDexLocation(i, dex_location_.c_str());
- const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(dex.c_str(), nullptr);
+ const OatDexFile* odex_dex_file = odex_file->GetOatDexFile(dex.c_str(), nullptr);
if (odex_dex_file == nullptr) {
required_dex_checksums_found_ = false;
break;
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index aed6bc57b3..52714f916b 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -35,8 +35,6 @@ OatQuickMethodHeader::OatQuickMethodHeader(uint32_t vmap_table_offset,
frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
code_size_(code_size) {}
-OatQuickMethodHeader::~OatQuickMethodHeader() {}
-
uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
const uintptr_t pc,
bool abort_on_failure) const {
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index d6762d6bc6..3d5be3650b 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -38,8 +38,6 @@ class PACKED(4) OatQuickMethodHeader {
uint32_t fp_spill_mask,
uint32_t code_size);
- ~OatQuickMethodHeader();
-
static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 946ea018f3..36dea60367 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -23,11 +23,24 @@
#include "mirror/field-inl.h"
#include "proxy_test.h"
#include "scoped_thread_state_change-inl.h"
+#include "well_known_classes.h"
namespace art {
namespace proxy_test {
-class ProxyTest : public CommonRuntimeTest {};
+class ProxyTest : public CommonRuntimeTest {
+ protected:
+ void SetUp() OVERRIDE {
+ CommonRuntimeTest::SetUp();
+ // The creation of a Proxy class uses WellKnownClasses. These are not normally initialized by
+ // CommonRuntimeTest so we need to do that now.
+ WellKnownClasses::Clear();
+ WellKnownClasses::Init(art::Thread::Current()->GetJniEnv());
+ // Since we aren't actually calling any of the native functions we can just immediately call
+ // LateInit after calling Init.
+ WellKnownClasses::LateInit(art::Thread::Current()->GetJniEnv());
+ }
+};
// Creates a proxy class and check ClassHelper works correctly.
TEST_F(ProxyTest, ProxyClassHelper) {
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 23ccf6ad58..e8d9658dd4 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -230,23 +230,22 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
// Find stack map of the catch block.
StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
DCHECK(catch_stack_map.IsValid());
- DexRegisterMap catch_vreg_map =
- code_info.GetDexRegisterMapOf(catch_stack_map, number_of_vregs);
- if (!catch_vreg_map.IsValid() || !catch_vreg_map.HasAnyLiveDexRegisters()) {
+ DexRegisterMap catch_vreg_map = code_info.GetDexRegisterMapOf(catch_stack_map);
+ if (!catch_vreg_map.HasAnyLiveDexRegisters()) {
return;
}
+ DCHECK_EQ(catch_vreg_map.size(), number_of_vregs);
// Find stack map of the throwing instruction.
StackMap throw_stack_map =
code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset());
DCHECK(throw_stack_map.IsValid());
- DexRegisterMap throw_vreg_map =
- code_info.GetDexRegisterMapOf(throw_stack_map, number_of_vregs);
- DCHECK(throw_vreg_map.IsValid());
+ DexRegisterMap throw_vreg_map = code_info.GetDexRegisterMapOf(throw_stack_map);
+ DCHECK_EQ(throw_vreg_map.size(), number_of_vregs);
// Copy values between them.
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
- DexRegisterLocation::Kind catch_location = catch_vreg_map.GetLocationKind(vreg);
+ DexRegisterLocation::Kind catch_location = catch_vreg_map[vreg].GetKind();
if (catch_location == DexRegisterLocation::Kind::kNone) {
continue;
}
@@ -254,7 +253,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
// Get vreg value from its current location.
uint32_t vreg_value;
- VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg));
+ VRegKind vreg_kind = ToVRegKind(throw_vreg_map[vreg].GetKind());
bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
vreg,
vreg_kind,
@@ -265,7 +264,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
<< "native_pc_offset=" << stack_visitor->GetNativePcOffset() << ")";
// Copy value to the catch phi's stack slot.
- int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg);
+ int32_t slot_offset = catch_vreg_map[vreg].GetStackOffsetInBytes();
ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
@@ -405,14 +404,12 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
DexRegisterMap vreg_map = IsInInlinedFrame()
- ? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
- stack_map,
- number_of_vregs)
- : code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
-
- if (!vreg_map.IsValid()) {
+ ? code_info.GetInlineDexRegisterMapOf(stack_map, GetCurrentInlinedFrame())
+ : code_info.GetDexRegisterMapOf(stack_map);
+ if (vreg_map.empty()) {
return;
}
+ DCHECK_EQ(vreg_map.size(), number_of_vregs);
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
if (updated_vregs != nullptr && updated_vregs[vreg]) {
@@ -420,14 +417,14 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
continue;
}
- DexRegisterLocation::Kind location = vreg_map.GetLocationKind(vreg);
+ DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
static constexpr uint32_t kDeadValue = 0xEBADDE09;
uint32_t value = kDeadValue;
bool is_reference = false;
switch (location) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg);
+ const int32_t offset = vreg_map[vreg].GetStackOffsetInBytes();
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
@@ -440,7 +437,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case DexRegisterLocation::Kind::kInRegisterHigh:
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
- uint32_t reg = vreg_map.GetMachineRegister(vreg);
+ uint32_t reg = vreg_map[vreg].GetMachineRegister();
bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
CHECK(result);
if (location == DexRegisterLocation::Kind::kInRegister) {
@@ -451,7 +448,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
break;
}
case DexRegisterLocation::Kind::kConstant: {
- value = vreg_map.GetConstant(vreg);
+ value = vreg_map[vreg].GetConstant();
if (value == 0) {
// Make it a reference for extra safety.
is_reference = true;
@@ -462,9 +459,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
break;
}
default: {
- LOG(FATAL)
- << "Unexpected location kind "
- << vreg_map.GetLocationInternalKind(vreg);
+ LOG(FATAL) << "Unexpected location kind " << vreg_map[vreg].GetKind();
UNREACHABLE();
}
}
@@ -477,7 +472,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
- return static_cast<VRegKind>(kinds.at(reg * 2));
+ return static_cast<VRegKind>(kinds[reg * 2]);
}
QuickExceptionHandler* const exception_handler_;
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 5035ba077c..640fa7e393 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -64,8 +64,11 @@ inline MirrorType* ReadBarrier::Barrier(
// If kAlwaysUpdateField is true, update the field atomically. This may fail if mutator
// updates before us, but it's OK.
if (kAlwaysUpdateField && ref != old_ref) {
- obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
- offset, old_ref, ref);
+ obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
+ old_ref,
+ ref,
+ CASMode::kStrong,
+ std::memory_order_release);
}
}
AssertToSpaceInvariant(obj, offset, ref);
@@ -82,8 +85,11 @@ inline MirrorType* ReadBarrier::Barrier(
ref = reinterpret_cast<MirrorType*>(Mark(old_ref));
// Update the field atomically. This may fail if mutator updates before us, but it's ok.
if (ref != old_ref) {
- obj->CasFieldStrongReleaseObjectWithoutWriteBarrier<false, false>(
- offset, old_ref, ref);
+ obj->CasFieldObjectWithoutWriteBarrier<false, false>(offset,
+ old_ref,
+ ref,
+ CASMode::kStrong,
+ std::memory_order_release);
}
}
AssertToSpaceInvariant(obj, offset, ref);
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 6aeedd4f02..646de757e0 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -138,7 +138,7 @@ class ArgArray {
}
void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa,
- ObjPtr<mirror::Object> receiver, jvalue* args)
+ ObjPtr<mirror::Object> receiver, const jvalue* args)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
@@ -457,6 +457,64 @@ void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
+ALWAYS_INLINE
+bool CheckArgsForInvokeMethod(ArtMethod* np_method,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> objects)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const DexFile::TypeList* classes = np_method->GetParameterTypeList();
+ uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
+ uint32_t arg_count = (objects == nullptr) ? 0 : objects->GetLength();
+ if (UNLIKELY(arg_count != classes_size)) {
+ ThrowIllegalArgumentException(StringPrintf("Wrong number of arguments; expected %d, got %d",
+ classes_size, arg_count).c_str());
+ return false;
+ }
+ return true;
+}
+
+ALWAYS_INLINE
+bool InvokeMethodImpl(const ScopedObjectAccessAlreadyRunnable& soa,
+ ArtMethod* m,
+ ArtMethod* np_method,
+ ObjPtr<mirror::Object> receiver,
+ ObjPtr<mirror::ObjectArray<mirror::Object>> objects,
+ const char** shorty,
+ JValue* result) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Invoke the method.
+ uint32_t shorty_len = 0;
+ *shorty = np_method->GetShorty(&shorty_len);
+ ArgArray arg_array(*shorty, shorty_len);
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, np_method, soa.Self())) {
+ CHECK(soa.Self()->IsExceptionPending());
+ return false;
+ }
+
+ InvokeWithArgArray(soa, m, &arg_array, result, *shorty);
+
+ // Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
+ if (soa.Self()->IsExceptionPending()) {
+ // If we get another exception when we are trying to wrap, then just use that instead.
+ ScopedLocalRef<jthrowable> th(soa.Env(), soa.Env()->ExceptionOccurred());
+ soa.Self()->ClearException();
+ jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
+ if (exception_class == nullptr) {
+ soa.Self()->AssertPendingException();
+ return false;
+ }
+ jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
+ CHECK(mid != nullptr);
+ jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th.get());
+ if (exception_instance == nullptr) {
+ soa.Self()->AssertPendingException();
+ return false;
+ }
+ soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
+ return false;
+ }
+
+ return true;
+}
+
} // anonymous namespace
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
@@ -492,7 +550,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
}
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
- jvalue* args) {
+ const jvalue* args) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -523,7 +581,7 @@ JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject o
}
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
- jobject obj, jmethodID mid, jvalue* args) {
+ jobject obj, jmethodID mid, const jvalue* args) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -632,12 +690,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
ObjPtr<mirror::ObjectArray<mirror::Object>> objects =
soa.Decode<mirror::ObjectArray<mirror::Object>>(javaArgs);
auto* np_method = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- const DexFile::TypeList* classes = np_method->GetParameterTypeList();
- uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
- uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
- if (arg_count != classes_size) {
- ThrowIllegalArgumentException(StringPrintf("Wrong number of arguments; expected %d, got %d",
- classes_size, arg_count).c_str());
+ if (!CheckArgsForInvokeMethod(np_method, objects)) {
return nullptr;
}
@@ -661,39 +714,54 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
// Invoke the method.
JValue result;
- uint32_t shorty_len = 0;
- const char* shorty = np_method->GetShorty(&shorty_len);
- ArgArray arg_array(shorty, shorty_len);
- if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, np_method, soa.Self())) {
- CHECK(soa.Self()->IsExceptionPending());
+ const char* shorty;
+ if (!InvokeMethodImpl(soa, m, np_method, receiver, objects, &shorty, &result)) {
return nullptr;
}
+ return soa.AddLocalReference<jobject>(BoxPrimitive(Primitive::GetType(shorty[0]), result));
+}
- InvokeWithArgArray(soa, m, &arg_array, &result, shorty);
+void InvokeConstructor(const ScopedObjectAccessAlreadyRunnable& soa,
+ ArtMethod* constructor,
+ ObjPtr<mirror::Object> receiver,
+ jobject javaArgs) {
+ // We want to make sure that the stack is not within a small distance from the
+ // protected region in case we are calling into a leaf function whose stack
+ // check has been elided.
+ if (UNLIKELY(__builtin_frame_address(0) < soa.Self()->GetStackEndForInterpreter(true))) {
+ ThrowStackOverflowError(soa.Self());
+ return;
+ }
- // Wrap any exception with "Ljava/lang/reflect/InvocationTargetException;" and return early.
- if (soa.Self()->IsExceptionPending()) {
- // If we get another exception when we are trying to wrap, then just use that instead.
- ScopedLocalRef<jthrowable> th(soa.Env(), soa.Env()->ExceptionOccurred());
- soa.Self()->ClearException();
- jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
- if (exception_class == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
- CHECK(mid != nullptr);
- jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th.get());
- if (exception_instance == nullptr) {
- soa.Self()->AssertPendingException();
- return nullptr;
- }
- soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
- return nullptr;
+ if (kIsDebugBuild) {
+ CHECK(constructor->IsConstructor());
+
+ ObjPtr<mirror::Class> declaring_class = constructor->GetDeclaringClass();
+ CHECK(declaring_class->IsInitialized());
+
+ // Calls to String.<init> should have been repplaced with with equivalent StringFactory calls.
+ CHECK(!declaring_class->IsStringClass());
+
+ // Check that the receiver is non-null and an instance of the field's declaring class.
+ CHECK(receiver != nullptr);
+ CHECK(VerifyObjectIsClass(receiver, declaring_class));
+ CHECK_EQ(constructor,
+ receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(constructor,
+ kRuntimePointerSize));
}
- // Box if necessary and return.
- return soa.AddLocalReference<jobject>(BoxPrimitive(Primitive::GetType(shorty[0]), result));
+ // Get our arrays of arguments and their types, and check they're the same size.
+ ObjPtr<mirror::ObjectArray<mirror::Object>> objects =
+ soa.Decode<mirror::ObjectArray<mirror::Object>>(javaArgs);
+ ArtMethod* np_method = constructor->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ if (!CheckArgsForInvokeMethod(np_method, objects)) {
+ return;
+ }
+
+ // Invoke the constructor.
+ JValue result;
+ const char* shorty;
+ InvokeMethodImpl(soa, constructor, np_method, receiver, objects, &shorty, &result);
}
ObjPtr<mirror::Object> BoxPrimitive(Primitive::Type src_class, const JValue& value) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 4560a3969e..74580a21e0 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -69,13 +69,13 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj,
jmethodID mid,
- jvalue* args)
+ const jvalue* args)
REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj,
jmethodID mid,
- jvalue* args)
+ const jvalue* args)
REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
@@ -92,6 +92,14 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa,
size_t num_frames = 1)
REQUIRES_SHARED(Locks::mutator_lock_);
+// Special-casing of the above. Assumes that the method is the correct constructor, the class is
+// initialized, and that the receiver is an instance of the class.
+void InvokeConstructor(const ScopedObjectAccessAlreadyRunnable& soa,
+ ArtMethod* constructor,
+ ObjPtr<mirror::Object> receiver,
+ jobject args)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE bool VerifyObjectIsClass(ObjPtr<mirror::Object> o, ObjPtr<mirror::Class> c)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1e327fc8ed..a81c4d0518 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -240,7 +240,7 @@ Runtime::Runtime()
exit_(nullptr),
abort_(nullptr),
stats_enabled_(false),
- is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
+ is_running_on_memory_tool_(kRunningOnMemoryTool),
instrumentation_(),
main_thread_group_(nullptr),
system_thread_group_(nullptr),
@@ -713,6 +713,23 @@ std::string Runtime::GetCompilerExecutable() const {
return compiler_executable;
}
+void Runtime::RunRootClinits(Thread* self) {
+ class_linker_->RunRootClinits(self);
+
+ GcRoot<mirror::Throwable>* exceptions[] = {
+ &pre_allocated_OutOfMemoryError_when_throwing_exception_,
+ // &pre_allocated_OutOfMemoryError_when_throwing_oome_, // Same class as above.
+ // &pre_allocated_OutOfMemoryError_when_handling_stack_overflow_, // Same class as above.
+ &pre_allocated_NoClassDefFoundError_,
+ };
+ for (GcRoot<mirror::Throwable>* exception : exceptions) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> klass = hs.NewHandle<mirror::Class>(exception->Read()->GetClass());
+ class_linker_->EnsureInitialized(self, klass, true, true);
+ self->AssertNoPendingException();
+ }
+}
+
bool Runtime::Start() {
VLOG(startup) << "Runtime::Start entering";
@@ -742,8 +759,10 @@ bool Runtime::Start() {
auto field_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::Field>(class_roots)));
class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
+ self->AssertNoPendingException();
// Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
+ self->AssertNoPendingException();
}
// InitNativeMethods needs to be after started_ so that the classes
@@ -1020,7 +1039,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
return false;
}
- for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
if (oat_dex_file == nullptr) {
*failures += 1;
continue;
@@ -1090,15 +1109,30 @@ void Runtime::SetSentinel(mirror::Object* sentinel) {
sentinel_ = GcRoot<mirror::Object>(sentinel);
}
-static inline void InitPreAllocatedException(Thread* self,
- GcRoot<mirror::Throwable>* exception,
- const char* exception_class_descriptor,
- const char* msg)
+static inline void CreatePreAllocatedException(Thread* self,
+ Runtime* runtime,
+ GcRoot<mirror::Throwable>* exception,
+ const char* exception_class_descriptor,
+ const char* msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(self, Thread::Current());
- self->ThrowNewException(exception_class_descriptor, msg);
- *exception = GcRoot<mirror::Throwable>(self->GetException());
- self->ClearException();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ // Allocate an object without initializing the class to allow non-trivial Throwable.<clinit>().
+ ObjPtr<mirror::Class> klass = class_linker->FindSystemClass(self, exception_class_descriptor);
+ CHECK(klass != nullptr);
+ gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
+ ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
+ klass->Alloc</* kIsInstrumented */ true>(self, allocator_type));
+ CHECK(exception_object != nullptr);
+ *exception = GcRoot<mirror::Throwable>(exception_object);
+ // Initialize the "detailMessage" field.
+ ObjPtr<mirror::String> message = mirror::String::AllocFromModifiedUtf8(self, msg);
+ CHECK(message != nullptr);
+ ObjPtr<mirror::Class> throwable = GetClassRoot<mirror::Throwable>(class_linker);
+ ArtField* detailMessageField =
+ throwable->FindDeclaredInstanceField("detailMessage", "Ljava/lang/String;");
+ CHECK(detailMessageField != nullptr);
+ detailMessageField->SetObject</* kTransactionActive */ false>(exception->Read(), message);
}
bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
@@ -1283,7 +1317,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
jdwp_options_ = runtime_options.GetOrDefault(Opt::JdwpOptions);
- jdwp_provider_ = runtime_options.GetOrDefault(Opt::JdwpProvider);
+ jdwp_provider_ = CanonicalizeJdwpProvider(runtime_options.GetOrDefault(Opt::JdwpProvider),
+ IsJavaDebuggable());
switch (jdwp_provider_) {
case JdwpProvider::kNone: {
VLOG(jdwp) << "Disabling all JDWP support.";
@@ -1317,6 +1352,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
constexpr const char* plugin_name = kIsDebugBuild ? "libadbconnectiond.so"
: "libadbconnection.so";
plugins_.push_back(Plugin::Create(plugin_name));
+ break;
+ }
+ case JdwpProvider::kUnset: {
+ LOG(FATAL) << "Illegal jdwp provider " << jdwp_provider_ << " was not filtered out!";
+ break;
}
}
callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
@@ -1362,8 +1402,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
case InstructionSet::kMips:
case InstructionSet::kMips64:
implicit_null_checks_ = true;
- // Installing stack protection does not play well with valgrind.
- implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
+ // Historical note: Installing stack protection was not playing well with Valgrind.
+ implicit_so_checks_ = true;
break;
default:
// Keep the defaults.
@@ -1378,8 +1418,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// These need to be in a specific order. The null point check handler must be
// after the suspend check and stack overflow check handlers.
//
- // Note: the instances attach themselves to the fault manager and are handled by it. The manager
- // will delete the instance on Shutdown().
+ // Note: the instances attach themselves to the fault manager and are handled by it. The
+ // manager will delete the instance on Shutdown().
if (implicit_suspend_checks_) {
new SuspensionHandler(&fault_manager);
}
@@ -1537,32 +1577,36 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
} else {
// Pre-allocate an OutOfMemoryError for the case when we fail to
// allocate the exception to be thrown.
- InitPreAllocatedException(self,
- &pre_allocated_OutOfMemoryError_when_throwing_exception_,
- "Ljava/lang/OutOfMemoryError;",
- "OutOfMemoryError thrown while trying to throw an exception; "
- "no stack trace available");
+ CreatePreAllocatedException(self,
+ this,
+ &pre_allocated_OutOfMemoryError_when_throwing_exception_,
+ "Ljava/lang/OutOfMemoryError;",
+ "OutOfMemoryError thrown while trying to throw an exception; "
+ "no stack trace available");
// Pre-allocate an OutOfMemoryError for the double-OOME case.
- InitPreAllocatedException(self,
- &pre_allocated_OutOfMemoryError_when_throwing_oome_,
- "Ljava/lang/OutOfMemoryError;",
- "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
- "no stack trace available");
+ CreatePreAllocatedException(self,
+ this,
+ &pre_allocated_OutOfMemoryError_when_throwing_oome_,
+ "Ljava/lang/OutOfMemoryError;",
+ "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
+ "no stack trace available");
// Pre-allocate an OutOfMemoryError for the case when we fail to
// allocate while handling a stack overflow.
- InitPreAllocatedException(self,
- &pre_allocated_OutOfMemoryError_when_handling_stack_overflow_,
- "Ljava/lang/OutOfMemoryError;",
- "OutOfMemoryError thrown while trying to handle a stack overflow; "
- "no stack trace available");
+ CreatePreAllocatedException(self,
+ this,
+ &pre_allocated_OutOfMemoryError_when_handling_stack_overflow_,
+ "Ljava/lang/OutOfMemoryError;",
+ "OutOfMemoryError thrown while trying to handle a stack overflow; "
+ "no stack trace available");
// Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
// ahead of checking the application's class loader.
- InitPreAllocatedException(self,
- &pre_allocated_NoClassDefFoundError_,
- "Ljava/lang/NoClassDefFoundError;",
- "Class not found using the boot class loader; "
- "no stack trace available");
+ CreatePreAllocatedException(self,
+ this,
+ &pre_allocated_NoClassDefFoundError_,
+ "Ljava/lang/NoClassDefFoundError;",
+ "Class not found using the boot class loader; "
+ "no stack trace available");
}
// Runtime initialization is largely done now.
@@ -2045,6 +2089,7 @@ void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
pre_allocated_OutOfMemoryError_when_handling_stack_overflow_
.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+ VisitImageRoots(visitor);
verifier::MethodVerifier::VisitStaticRoots(visitor);
VisitTransactionRoots(visitor);
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d85490c0a6..f413733804 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -212,6 +212,8 @@ class Runtime {
return finished_starting_;
}
+ void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
static Runtime* Current() {
return instance_;
}
diff --git a/runtime/runtime_callbacks_test.cc b/runtime/runtime_callbacks_test.cc
index 72d9919971..794ac19c4b 100644
--- a/runtime/runtime_callbacks_test.cc
+++ b/runtime/runtime_callbacks_test.cc
@@ -339,9 +339,6 @@ class RuntimeSigQuitCallbackRuntimeCallbacksTest : public RuntimeCallbacksTest {
};
TEST_F(RuntimeSigQuitCallbackRuntimeCallbacksTest, SigQuit) {
- // SigQuit induces a dump. ASAN isn't happy with libunwind reading memory.
- TEST_DISABLED_FOR_MEMORY_TOOL_ASAN();
-
// The runtime needs to be started for the signal handler.
Thread* self = Thread::Current();
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index e647423b9c..3f9a3229ca 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -44,7 +44,7 @@ RUNTIME_OPTIONS_KEY (std::string, Image)
RUNTIME_OPTIONS_KEY (Unit, CheckJni)
RUNTIME_OPTIONS_KEY (Unit, JniOptsForceCopy)
RUNTIME_OPTIONS_KEY (std::string, JdwpOptions, "")
-RUNTIME_OPTIONS_KEY (JdwpProvider, JdwpProvider, JdwpProvider::kNone)
+RUNTIME_OPTIONS_KEY (JdwpProvider, JdwpProvider, JdwpProvider::kUnset)
RUNTIME_OPTIONS_KEY (MemoryKiB, MemoryMaximumSize, gc::Heap::kDefaultMaximumSize) // -Xmx
RUNTIME_OPTIONS_KEY (MemoryKiB, MemoryInitialSize, gc::Heap::kDefaultInitialSize) // -Xms
RUNTIME_OPTIONS_KEY (MemoryKiB, HeapGrowthLimit) // Default is 0 for unlimited
diff --git a/runtime/stack.cc b/runtime/stack.cc
index bd0d5d680e..053c28fce9 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -68,7 +68,6 @@ StackVisitor::StackVisitor(Thread* thread,
cur_oat_quick_method_header_(nullptr),
num_frames_(num_frames),
cur_depth_(0),
- current_inlining_depth_(0),
context_(context),
check_suspended_(check_suspended) {
if (check_suspended_) {
@@ -76,32 +75,15 @@ StackVisitor::StackVisitor(Thread* thread,
}
}
-static StackMap GetCurrentStackMap(CodeInfo& code_info,
- const OatQuickMethodHeader* method_header,
- uintptr_t cur_quick_frame_pc)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- DCHECK(stack_map.IsValid());
- return stack_map;
-}
-
ArtMethod* StackVisitor::GetMethod() const {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetMethod();
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
- size_t depth_in_stack_map = current_inlining_depth_ - 1;
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info(method_header);
- StackMap stack_map = GetCurrentStackMap(code_info, method_header, cur_quick_frame_pc_);
MethodInfo method_info = method_header->GetOptimizedMethodInfo();
DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
- return GetResolvedMethod(*GetCurrentQuickFrame(),
- method_info,
- code_info,
- stack_map,
- depth_in_stack_map);
+ return GetResolvedMethod(*GetCurrentQuickFrame(), method_info, current_inline_frames_);
} else {
return *cur_quick_frame_;
}
@@ -114,11 +96,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
- const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info(method_header);
- size_t depth_in_stack_map = current_inlining_depth_ - 1;
- StackMap stack_map = GetCurrentStackMap(code_info, method_header, cur_quick_frame_pc_);
- return code_info.GetInlineInfoAtDepth(stack_map, depth_in_stack_map).GetDexPc();
+ return current_inline_frames_.back().GetDexPc();
} else if (cur_oat_quick_method_header_ == nullptr) {
return dex::kDexNoIndex;
} else {
@@ -233,19 +211,18 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- size_t depth_in_stack_map = current_inlining_depth_ - 1;
DexRegisterMap dex_register_map = IsInInlinedFrame()
- ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map, stack_map, number_of_dex_registers)
- : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
-
- if (!dex_register_map.IsValid()) {
+ ? code_info.GetInlineDexRegisterMapOf(stack_map, current_inline_frames_.back())
+ : code_info.GetDexRegisterMapOf(stack_map);
+ if (dex_register_map.empty()) {
return false;
}
- DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
+ DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
+ DexRegisterLocation::Kind location_kind = dex_register_map[vreg].GetKind();
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
- const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
+ const int32_t offset = dex_register_map[vreg].GetStackOffsetInBytes();
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
@@ -254,18 +231,16 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
case DexRegisterLocation::Kind::kInRegisterHigh:
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
- uint32_t reg = dex_register_map.GetMachineRegister(vreg);
+ uint32_t reg = dex_register_map[vreg].GetMachineRegister();
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg);
+ *val = dex_register_map[vreg].GetConstant();
return true;
case DexRegisterLocation::Kind::kNone:
return false;
default:
- LOG(FATAL)
- << "Unexpected location kind "
- << dex_register_map.GetLocationInternalKind(vreg);
+ LOG(FATAL) << "Unexpected location kind " << dex_register_map[vreg].GetKind();
UNREACHABLE();
}
}
@@ -822,10 +797,10 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
- DCHECK_EQ(current_inlining_depth_, 0u);
- for (current_inlining_depth_ = code_info.GetInlineDepthOf(stack_map);
- current_inlining_depth_ != 0;
- --current_inlining_depth_) {
+ DCHECK_EQ(current_inline_frames_.size(), 0u);
+ for (current_inline_frames_ = code_info.GetInlineInfosOf(stack_map);
+ !current_inline_frames_.empty();
+ current_inline_frames_.pop_back()) {
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
return;
@@ -851,13 +826,14 @@ void StackVisitor::WalkStack(bool include_transitions) {
uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
- if (UNLIKELY(exit_stubs_installed)) {
+ if (UNLIKELY(exit_stubs_installed ||
+ reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
// While profiling, the return pc is restored from the side stack, except when walking
// the stack for an exception where the side stack will be unwound in VisitFrame.
if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
CHECK_LT(instrumentation_stack_depth, thread_->GetInstrumentationStack()->size());
const instrumentation::InstrumentationStackFrame& instrumentation_frame =
- thread_->GetInstrumentationStack()->at(instrumentation_stack_depth);
+ (*thread_->GetInstrumentationStack())[instrumentation_stack_depth];
instrumentation_stack_depth++;
if (GetMethod() ==
Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
diff --git a/runtime/stack.h b/runtime/stack.h
index a16930bba0..02578d25b7 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
namespace art {
@@ -219,11 +220,11 @@ class StackVisitor {
void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
bool IsInInlinedFrame() const {
- return current_inlining_depth_ != 0;
+ return !current_inline_frames_.empty();
}
- size_t GetCurrentInliningDepth() const {
- return current_inlining_depth_;
+ InlineInfo GetCurrentInlinedFrame() const {
+ return current_inline_frames_.back();
}
uintptr_t GetCurrentQuickFramePc() const {
@@ -309,9 +310,9 @@ class StackVisitor {
size_t num_frames_;
// Depth of the frame we're currently at.
size_t cur_depth_;
- // Current inlining depth of the method we are currently at.
- // 0 if there is no inlined frame.
- size_t current_inlining_depth_;
+ // Current inlined frames of the method we are currently at.
+ // We keep poping frames from the end as we visit the frames.
+ BitTableRange<InlineInfo> current_inline_frames_;
protected:
Context* const context_;
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index a5749b84a7..7e46eb7e47 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -21,58 +21,173 @@
#include "art_method.h"
#include "base/indenter.h"
+#include "base/stats.h"
+#include "oat_quick_method_header.h"
#include "scoped_thread_state_change-inl.h"
namespace art {
-std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation& reg) {
- using Kind = DexRegisterLocation::Kind;
- switch (reg.GetKind()) {
- case Kind::kNone:
- return stream << "None";
- case Kind::kInStack:
- return stream << "sp+" << reg.GetValue();
- case Kind::kInRegister:
- return stream << "r" << reg.GetValue();
- case Kind::kInRegisterHigh:
- return stream << "r" << reg.GetValue() << "/hi";
- case Kind::kInFpuRegister:
- return stream << "f" << reg.GetValue();
- case Kind::kInFpuRegisterHigh:
- return stream << "f" << reg.GetValue() << "/hi";
- case Kind::kConstant:
- return stream << "#" << reg.GetValue();
- default:
- return stream << "DexRegisterLocation(" << static_cast<uint32_t>(reg.GetKind())
- << "," << reg.GetValue() << ")";
+CodeInfo::CodeInfo(const OatQuickMethodHeader* header)
+ : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
+}
+
+void CodeInfo::Decode(const uint8_t* data) {
+ const uint8_t* begin = data;
+ frame_size_in_bytes_ = DecodeUnsignedLeb128(&data);
+ core_spill_mask_ = DecodeUnsignedLeb128(&data);
+ fp_spill_mask_ = DecodeUnsignedLeb128(&data);
+ number_of_dex_registers_ = DecodeUnsignedLeb128(&data);
+ BitMemoryReader reader(data, /* bit_offset */ 0);
+ stack_maps_.Decode(reader);
+ register_masks_.Decode(reader);
+ stack_masks_.Decode(reader);
+ invoke_infos_.Decode(reader);
+ inline_infos_.Decode(reader);
+ dex_register_masks_.Decode(reader);
+ dex_register_maps_.Decode(reader);
+ dex_register_catalog_.Decode(reader);
+ size_in_bits_ = (data - begin) * kBitsPerByte + reader.GetBitOffset();
+}
+
+BitTable<StackMap>::const_iterator CodeInfo::BinarySearchNativePc(uint32_t packed_pc) const {
+ return std::partition_point(
+ stack_maps_.begin(),
+ stack_maps_.end(),
+ [packed_pc](const StackMap& sm) {
+ return sm.GetPackedNativePc() < packed_pc && sm.GetKind() != StackMap::Kind::Catch;
+ });
+}
+
+StackMap CodeInfo::GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa) const {
+ auto it = BinarySearchNativePc(StackMap::PackNativePc(pc, isa));
+ // Start at the lower bound and iterate over all stack maps with the given native pc.
+ for (; it != stack_maps_.end() && (*it).GetNativePcOffset(isa) == pc; ++it) {
+ StackMap::Kind kind = static_cast<StackMap::Kind>((*it).GetKind());
+ if (kind == StackMap::Kind::Default || kind == StackMap::Kind::OSR) {
+ return *it;
+ }
}
+ return stack_maps_.GetInvalidRow();
}
-static void DumpDexRegisterMap(VariableIndentationOutputStream* vios,
- const DexRegisterMap& map) {
- if (map.IsValid()) {
+// Scan backward to determine dex register locations at given stack map.
+// All registers for a stack map are combined - inlined registers are just appended,
+// therefore 'first_dex_register' allows us to select a sub-range to decode.
+void CodeInfo::DecodeDexRegisterMap(uint32_t stack_map_index,
+ uint32_t first_dex_register,
+ /*out*/ DexRegisterMap* map) const {
+ // Count remaining work so we know when we have finished.
+ uint32_t remaining_registers = map->size();
+
+ // Keep scanning backwards and collect the most recent location of each register.
+ for (int32_t s = stack_map_index; s >= 0 && remaining_registers != 0; s--) {
+ StackMap stack_map = GetStackMapAt(s);
+ DCHECK_LE(stack_map_index - s, kMaxDexRegisterMapSearchDistance) << "Unbounded search";
+
+ // The mask specifies which registers where modified in this stack map.
+ // NB: the mask can be shorter than expected if trailing zero bits were removed.
+ uint32_t mask_index = stack_map.GetDexRegisterMaskIndex();
+ if (mask_index == StackMap::kNoValue) {
+ continue; // Nothing changed at this stack map.
+ }
+ BitMemoryRegion mask = dex_register_masks_.GetBitMemoryRegion(mask_index);
+ if (mask.size_in_bits() <= first_dex_register) {
+ continue; // Nothing changed after the first register we are interested in.
+ }
+
+ // The map stores one catalogue index per each modified register location.
+ uint32_t map_index = stack_map.GetDexRegisterMapIndex();
+ DCHECK_NE(map_index, StackMap::kNoValue);
+
+ // Skip initial registers which we are not interested in (to get to inlined registers).
+ map_index += mask.PopCount(0, first_dex_register);
+ mask = mask.Subregion(first_dex_register, mask.size_in_bits() - first_dex_register);
+
+ // Update registers that we see for first time (i.e. most recent value).
+ DexRegisterLocation* regs = map->data();
+ const uint32_t end = std::min<uint32_t>(map->size(), mask.size_in_bits());
+ const size_t kNumBits = BitSizeOf<uint32_t>();
+ for (uint32_t reg = 0; reg < end; reg += kNumBits) {
+ // Process the mask in chunks of kNumBits for performance.
+ uint32_t bits = mask.LoadBits(reg, std::min<uint32_t>(end - reg, kNumBits));
+ while (bits != 0) {
+ uint32_t bit = CTZ(bits);
+ if (regs[reg + bit].GetKind() == DexRegisterLocation::Kind::kInvalid) {
+ regs[reg + bit] = GetDexRegisterCatalogEntry(dex_register_maps_.Get(map_index));
+ remaining_registers--;
+ }
+ map_index++;
+ bits ^= 1u << bit; // Clear the bit.
+ }
+ }
+ }
+
+ // Set any remaining registers to None (which is the default state at first stack map).
+ if (remaining_registers != 0) {
+ DexRegisterLocation* regs = map->data();
+ for (uint32_t r = 0; r < map->size(); r++) {
+ if (regs[r].GetKind() == DexRegisterLocation::Kind::kInvalid) {
+ regs[r] = DexRegisterLocation::None();
+ }
+ }
+ }
+}
+
+template<typename Accessor>
+static void AddTableSizeStats(const char* table_name,
+ const BitTable<Accessor>& table,
+ /*out*/ Stats* parent) {
+ Stats* table_stats = parent->Child(table_name);
+ table_stats->AddBits(table.BitSize());
+ table_stats->Child("Header")->AddBits(table.HeaderBitSize());
+ const char* const* column_names = GetBitTableColumnNames<Accessor>();
+ for (size_t c = 0; c < table.NumColumns(); c++) {
+ if (table.NumColumnBits(c) > 0) {
+ Stats* column_stats = table_stats->Child(column_names[c]);
+ column_stats->AddBits(table.NumRows() * table.NumColumnBits(c), table.NumRows());
+ }
+ }
+}
+
+void CodeInfo::AddSizeStats(/*out*/ Stats* parent) const {
+ Stats* stats = parent->Child("CodeInfo");
+ stats->AddBytes(Size());
+ AddTableSizeStats<StackMap>("StackMaps", stack_maps_, stats);
+ AddTableSizeStats<RegisterMask>("RegisterMasks", register_masks_, stats);
+ AddTableSizeStats<MaskInfo>("StackMasks", stack_masks_, stats);
+ AddTableSizeStats<InvokeInfo>("InvokeInfos", invoke_infos_, stats);
+ AddTableSizeStats<InlineInfo>("InlineInfos", inline_infos_, stats);
+ AddTableSizeStats<MaskInfo>("DexRegisterMasks", dex_register_masks_, stats);
+ AddTableSizeStats<DexRegisterMapInfo>("DexRegisterMaps", dex_register_maps_, stats);
+ AddTableSizeStats<DexRegisterInfo>("DexRegisterCatalog", dex_register_catalog_, stats);
+}
+
+void DexRegisterMap::Dump(VariableIndentationOutputStream* vios) const {
+ if (HasAnyLiveDexRegisters()) {
ScopedIndentation indent1(vios);
- for (size_t i = 0; i < map.size(); ++i) {
- if (map.IsDexRegisterLive(i)) {
- vios->Stream() << "v" << i << ":" << map.Get(i) << " ";
+ for (size_t i = 0; i < size(); ++i) {
+ DexRegisterLocation reg = (*this)[i];
+ if (reg.IsLive()) {
+ vios->Stream() << "v" << i << ":" << reg << " ";
}
}
vios->Stream() << "\n";
}
}
-template<uint32_t kNumColumns>
+template<typename Accessor>
static void DumpTable(VariableIndentationOutputStream* vios,
const char* table_name,
- const BitTable<kNumColumns>& table,
+ const BitTable<Accessor>& table,
bool verbose,
bool is_mask = false) {
if (table.NumRows() != 0) {
- vios->Stream() << table_name << " BitSize=" << table.NumRows() * table.NumRowBits();
+ vios->Stream() << table_name << " BitSize=" << table.BitSize();
vios->Stream() << " Rows=" << table.NumRows() << " Bits={";
+ const char* const* column_names = GetBitTableColumnNames<Accessor>();
for (size_t c = 0; c < table.NumColumns(); c++) {
vios->Stream() << (c != 0 ? " " : "");
- vios->Stream() << table.NumColumnBits(c);
+ vios->Stream() << column_names[c] << "=" << table.NumColumnBits(c);
}
vios->Stream() << "}\n";
if (verbose) {
@@ -98,29 +213,27 @@ static void DumpTable(VariableIndentationOutputStream* vios,
void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
- uint16_t num_dex_registers,
bool verbose,
InstructionSet instruction_set,
const MethodInfo& method_info) const {
vios->Stream()
<< "CodeInfo"
- << " BitSize=" << size_ * kBitsPerByte
+ << " BitSize=" << size_in_bits_
<< "\n";
ScopedIndentation indent1(vios);
- DumpTable(vios, "StackMaps", stack_maps_, verbose);
- DumpTable(vios, "RegisterMasks", register_masks_, verbose);
- DumpTable(vios, "StackMasks", stack_masks_, verbose, true /* is_mask */);
- DumpTable(vios, "InvokeInfos", invoke_infos_, verbose);
- DumpTable(vios, "InlineInfos", inline_infos_, verbose);
- DumpTable(vios, "DexRegisterMasks", dex_register_masks_, verbose, true /* is_mask */);
- DumpTable(vios, "DexRegisterMaps", dex_register_maps_, verbose);
- DumpTable(vios, "DexRegisterCatalog", dex_register_catalog_, verbose);
+ DumpTable<StackMap>(vios, "StackMaps", stack_maps_, verbose);
+ DumpTable<RegisterMask>(vios, "RegisterMasks", register_masks_, verbose);
+ DumpTable<MaskInfo>(vios, "StackMasks", stack_masks_, verbose, true /* is_mask */);
+ DumpTable<InvokeInfo>(vios, "InvokeInfos", invoke_infos_, verbose);
+ DumpTable<InlineInfo>(vios, "InlineInfos", inline_infos_, verbose);
+ DumpTable<MaskInfo>(vios, "DexRegisterMasks", dex_register_masks_, verbose, true /* is_mask */);
+ DumpTable<DexRegisterMapInfo>(vios, "DexRegisterMaps", dex_register_maps_, verbose);
+ DumpTable<DexRegisterInfo>(vios, "DexRegisterCatalog", dex_register_catalog_, verbose);
// Display stack maps along with (live) Dex register maps.
if (verbose) {
- for (size_t i = 0; i < GetNumberOfStackMaps(); ++i) {
- StackMap stack_map = GetStackMapAt(i);
- stack_map.Dump(vios, *this, method_info, code_offset, num_dex_registers, instruction_set);
+ for (StackMap stack_map : stack_maps_) {
+ stack_map.Dump(vios, *this, method_info, code_offset, instruction_set);
}
}
}
@@ -129,7 +242,6 @@ void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const MethodInfo& method_info,
uint32_t code_offset,
- uint16_t number_of_dex_registers,
InstructionSet instruction_set) const {
const uint32_t pc_offset = GetNativePcOffset(instruction_set);
vios->Stream()
@@ -145,22 +257,16 @@ void StackMap::Dump(VariableIndentationOutputStream* vios,
vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
vios->Stream() << ")\n";
- DumpDexRegisterMap(vios, code_info.GetDexRegisterMapOf(*this, number_of_dex_registers));
- uint32_t depth = code_info.GetInlineDepthOf(*this);
- for (size_t d = 0; d < depth; d++) {
- InlineInfo inline_info = code_info.GetInlineInfoAtDepth(*this, d);
- // We do not know the length of the dex register maps of inlined frames
- // at this level, so we just pass null to `InlineInfo::Dump` to tell
- // it not to look at these maps.
- inline_info.Dump(vios, code_info, *this, method_info, 0);
+ code_info.GetDexRegisterMapOf(*this).Dump(vios);
+ for (InlineInfo inline_info : code_info.GetInlineInfosOf(*this)) {
+ inline_info.Dump(vios, code_info, *this, method_info);
}
}
void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const StackMap& stack_map,
- const MethodInfo& method_info,
- uint16_t number_of_dex_registers) const {
+ const MethodInfo& method_info) const {
uint32_t depth = Row() - stack_map.GetInlineInfoIndex();
vios->Stream()
<< "InlineInfo[" << Row() << "]"
@@ -176,10 +282,7 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios,
<< ", method_index=" << GetMethodIndex(method_info);
}
vios->Stream() << ")\n";
- if (number_of_dex_registers != 0) {
- uint16_t vregs = number_of_dex_registers;
- DumpDexRegisterMap(vios, code_info.GetDexRegisterMapAtDepth(depth, stack_map, vregs));
- }
+ code_info.GetInlineDexRegisterMapOf(stack_map, *this).Dump(vios);
}
} // namespace art
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 6da002138c..2f2053a52a 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -19,6 +19,7 @@
#include <limits>
+#include "arch/instruction_set.h"
#include "base/bit_memory_region.h"
#include "base/bit_table.h"
#include "base/bit_utils.h"
@@ -28,10 +29,11 @@
#include "dex/dex_file_types.h"
#include "dex_register_location.h"
#include "method_info.h"
-#include "oat_quick_method_header.h"
+#include "quick/quick_method_frame_info.h"
namespace art {
+class OatQuickMethodHeader;
class VariableIndentationOutputStream;
// Size of a frame slot, in bytes. This constant is a signed value,
@@ -39,95 +41,69 @@ class VariableIndentationOutputStream;
// (signed) values.
static constexpr ssize_t kFrameSlotSize = 4;
+// The delta compression of dex register maps means we need to scan the stackmaps backwards.
+// We compress the data in such a way so that there is an upper bound on the search distance.
+// Max distance 0 means each stack map must be fully defined and no scanning back is allowed.
+// If this value is changed, the oat file version should be incremented (for DCHECK to pass).
+static constexpr size_t kMaxDexRegisterMapSearchDistance = 32;
+
class ArtMethod;
class CodeInfo;
+class Stats;
std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation& reg);
// Information on Dex register locations for a specific PC.
// Effectively just a convenience wrapper for DexRegisterLocation vector.
// If the size is small enough, it keeps the data on the stack.
+// TODO: Replace this with generic purpose "small-vector" implementation.
class DexRegisterMap {
public:
- // Create map for given number of registers and initialize all locations to None.
- explicit DexRegisterMap(size_t count) : count_(count), regs_small_{} {
+ using iterator = DexRegisterLocation*;
+ using const_iterator = const DexRegisterLocation*;
+
+ // Create map for given number of registers and initialize them to the given value.
+ DexRegisterMap(size_t count, DexRegisterLocation value) : count_(count), regs_small_{} {
if (count_ <= kSmallCount) {
- std::fill_n(regs_small_.begin(), count, DexRegisterLocation::None());
+ std::fill_n(regs_small_.begin(), count, value);
} else {
- regs_large_.resize(count, DexRegisterLocation::None());
+ regs_large_.resize(count, value);
}
}
DexRegisterLocation* data() {
return count_ <= kSmallCount ? regs_small_.data() : regs_large_.data();
}
+ const DexRegisterLocation* data() const {
+ return count_ <= kSmallCount ? regs_small_.data() : regs_large_.data();
+ }
+ iterator begin() { return data(); }
+ iterator end() { return data() + count_; }
+ const_iterator begin() const { return data(); }
+ const_iterator end() const { return data() + count_; }
size_t size() const { return count_; }
+ bool empty() const { return count_ == 0; }
- bool IsValid() const { return count_ != 0; }
-
- DexRegisterLocation Get(size_t index) const {
+ DexRegisterLocation& operator[](size_t index) {
DCHECK_LT(index, count_);
- return count_ <= kSmallCount ? regs_small_[index] : regs_large_[index];
- }
-
- DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number) const {
- return Get(dex_register_number).GetKind();
- }
-
- // TODO: Remove.
- DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number) const {
- return Get(dex_register_number).GetKind();
+ return data()[index];
}
-
- DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number) const {
- return Get(dex_register_number);
- }
-
- int32_t GetStackOffsetInBytes(uint16_t dex_register_number) const {
- DexRegisterLocation location = Get(dex_register_number);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
- return location.GetValue();
- }
-
- int32_t GetConstant(uint16_t dex_register_number) const {
- DexRegisterLocation location = Get(dex_register_number);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
- return location.GetValue();
- }
-
- int32_t GetMachineRegister(uint16_t dex_register_number) const {
- DexRegisterLocation location = Get(dex_register_number);
- DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInRegister ||
- location.GetKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
- location.GetKind() == DexRegisterLocation::Kind::kInFpuRegister ||
- location.GetKind() == DexRegisterLocation::Kind::kInFpuRegisterHigh);
- return location.GetValue();
- }
-
- ALWAYS_INLINE bool IsDexRegisterLive(uint16_t dex_register_number) const {
- return Get(dex_register_number).IsLive();
+ const DexRegisterLocation& operator[](size_t index) const {
+ DCHECK_LT(index, count_);
+ return data()[index];
}
size_t GetNumberOfLiveDexRegisters() const {
- size_t number_of_live_dex_registers = 0;
- for (size_t i = 0; i < count_; ++i) {
- if (IsDexRegisterLive(i)) {
- ++number_of_live_dex_registers;
- }
- }
- return number_of_live_dex_registers;
+ return std::count_if(begin(), end(), [](auto& loc) { return loc.IsLive(); });
}
bool HasAnyLiveDexRegisters() const {
- for (size_t i = 0; i < count_; ++i) {
- if (IsDexRegisterLive(i)) {
- return true;
- }
- }
- return false;
+ return std::any_of(begin(), end(), [](auto& loc) { return loc.IsLive(); });
}
+ void Dump(VariableIndentationOutputStream* vios) const;
+
private:
// Store the data inline if the number of registers is small to avoid memory allocations.
// If count_ <= kSmallCount, we use the regs_small_ array, and regs_large_ otherwise.
@@ -145,19 +121,26 @@ class DexRegisterMap {
* - Knowing the inlining information,
* - Knowing the values of dex registers.
*/
-class StackMap : public BitTable<7>::Accessor {
+class StackMap : public BitTableAccessor<8> {
public:
+ enum Kind {
+ Default = -1,
+ Catch = 0,
+ OSR = 1,
+ Debug = 2,
+ };
BIT_TABLE_HEADER()
- BIT_TABLE_COLUMN(0, PackedNativePc)
- BIT_TABLE_COLUMN(1, DexPc)
- BIT_TABLE_COLUMN(2, RegisterMaskIndex)
- BIT_TABLE_COLUMN(3, StackMaskIndex)
- BIT_TABLE_COLUMN(4, InlineInfoIndex)
- BIT_TABLE_COLUMN(5, DexRegisterMaskIndex)
- BIT_TABLE_COLUMN(6, DexRegisterMapIndex)
+ BIT_TABLE_COLUMN(0, Kind)
+ BIT_TABLE_COLUMN(1, PackedNativePc)
+ BIT_TABLE_COLUMN(2, DexPc)
+ BIT_TABLE_COLUMN(3, RegisterMaskIndex)
+ BIT_TABLE_COLUMN(4, StackMaskIndex)
+ BIT_TABLE_COLUMN(5, InlineInfoIndex)
+ BIT_TABLE_COLUMN(6, DexRegisterMaskIndex)
+ BIT_TABLE_COLUMN(7, DexRegisterMapIndex)
ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
- return UnpackNativePc(Get<kPackedNativePc>(), instruction_set);
+ return UnpackNativePc(GetPackedNativePc(), instruction_set);
}
ALWAYS_INLINE bool HasInlineInfo() const {
@@ -183,7 +166,6 @@ class StackMap : public BitTable<7>::Accessor {
const CodeInfo& code_info,
const MethodInfo& method_info,
uint32_t code_offset,
- uint16_t number_of_dex_registers,
InstructionSet instruction_set) const;
};
@@ -192,7 +174,7 @@ class StackMap : public BitTable<7>::Accessor {
* The row referenced from the StackMap holds information at depth 0.
* Following rows hold information for further depths.
*/
-class InlineInfo : public BitTable<7>::Accessor {
+class InlineInfo : public BitTableAccessor<6> {
public:
BIT_TABLE_HEADER()
BIT_TABLE_COLUMN(0, IsLast) // Determines if there are further rows for further depths.
@@ -200,7 +182,7 @@ class InlineInfo : public BitTable<7>::Accessor {
BIT_TABLE_COLUMN(2, MethodInfoIndex)
BIT_TABLE_COLUMN(3, ArtMethodHi) // High bits of ArtMethod*.
BIT_TABLE_COLUMN(4, ArtMethodLo) // Low bits of ArtMethod*.
- BIT_TABLE_COLUMN(5, DexRegisterMaskIndex)
+ BIT_TABLE_COLUMN(5, NumberOfDexRegisters) // Includes outer levels and the main method.
BIT_TABLE_COLUMN(6, DexRegisterMapIndex)
static constexpr uint32_t kLast = -1;
@@ -220,18 +202,13 @@ class InlineInfo : public BitTable<7>::Accessor {
return reinterpret_cast<ArtMethod*>((hi << 32) | lo);
}
- ALWAYS_INLINE bool HasDexRegisterMap() const {
- return HasDexRegisterMapIndex();
- }
-
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& info,
const StackMap& stack_map,
- const MethodInfo& method_info,
- uint16_t number_of_dex_registers) const;
+ const MethodInfo& method_info) const;
};
-class InvokeInfo : public BitTable<3>::Accessor {
+class InvokeInfo : public BitTableAccessor<3> {
public:
BIT_TABLE_HEADER()
BIT_TABLE_COLUMN(0, PackedNativePc)
@@ -239,7 +216,7 @@ class InvokeInfo : public BitTable<3>::Accessor {
BIT_TABLE_COLUMN(2, MethodInfoIndex)
ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
- return StackMap::UnpackNativePc(Get<kPackedNativePc>(), instruction_set);
+ return StackMap::UnpackNativePc(GetPackedNativePc(), instruction_set);
}
uint32_t GetMethodIndex(MethodInfo method_info) const {
@@ -247,7 +224,19 @@ class InvokeInfo : public BitTable<3>::Accessor {
}
};
-class DexRegisterInfo : public BitTable<2>::Accessor {
+class MaskInfo : public BitTableAccessor<1> {
+ public:
+ BIT_TABLE_HEADER()
+ BIT_TABLE_COLUMN(0, Mask)
+};
+
+class DexRegisterMapInfo : public BitTableAccessor<1> {
+ public:
+ BIT_TABLE_HEADER()
+ BIT_TABLE_COLUMN(0, CatalogueIndex)
+};
+
+class DexRegisterInfo : public BitTableAccessor<2> {
public:
BIT_TABLE_HEADER()
BIT_TABLE_COLUMN(0, Kind)
@@ -278,7 +267,7 @@ class DexRegisterInfo : public BitTable<2>::Accessor {
// Register masks tend to have many trailing zero bits (caller-saves are usually not encoded),
// therefore it is worth encoding the mask as value+shift.
-class RegisterMask : public BitTable<2>::Accessor {
+class RegisterMask : public BitTableAccessor<2> {
public:
BIT_TABLE_HEADER()
BIT_TABLE_COLUMN(0, Value)
@@ -300,23 +289,21 @@ class CodeInfo {
}
explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
- DCHECK_EQ(size_, region.size());
+ DCHECK_EQ(Size(), region.size());
}
- explicit CodeInfo(const OatQuickMethodHeader* header)
- : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
- }
+ explicit CodeInfo(const OatQuickMethodHeader* header);
size_t Size() const {
- return size_;
+ return BitsToBytesRoundUp(size_in_bits_);
}
- bool HasInlineInfo() const {
- return inline_infos_.NumRows() > 0;
+ ALWAYS_INLINE const BitTable<StackMap>& GetStackMaps() const {
+ return stack_maps_;
}
ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
- return StackMap(&stack_maps_, index);
+ return stack_maps_.GetRow(index);
}
BitMemoryRegion GetStackMask(size_t index) const {
@@ -330,7 +317,7 @@ class CodeInfo {
uint32_t GetRegisterMaskOf(const StackMap& stack_map) const {
uint32_t index = stack_map.GetRegisterMaskIndex();
- return (index == StackMap::kNoValue) ? 0 : RegisterMask(&register_masks_, index).GetMask();
+ return (index == StackMap::kNoValue) ? 0 : register_masks_.GetRow(index).GetMask();
}
uint32_t GetNumberOfLocationCatalogEntries() const {
@@ -338,7 +325,13 @@ class CodeInfo {
}
ALWAYS_INLINE DexRegisterLocation GetDexRegisterCatalogEntry(size_t index) const {
- return DexRegisterInfo(&dex_register_catalog_, index).GetLocation();
+ return (index == StackMap::kNoValue)
+ ? DexRegisterLocation::None()
+ : dex_register_catalog_.GetRow(index).GetLocation();
+ }
+
+ bool HasInlineInfo() const {
+ return inline_infos_.NumRows() > 0;
}
uint32_t GetNumberOfStackMaps() const {
@@ -346,174 +339,132 @@ class CodeInfo {
}
InvokeInfo GetInvokeInfo(size_t index) const {
- return InvokeInfo(&invoke_infos_, index);
- }
-
- ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
- size_t num_dex_registers) const {
- return DecodeDexRegisterMap(stack_map.GetDexRegisterMaskIndex(),
- stack_map.GetDexRegisterMapIndex(),
- num_dex_registers);
- }
-
- ALWAYS_INLINE DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
- StackMap stack_map,
- size_t num_dex_registers) const {
- InlineInfo inline_info = GetInlineInfoAtDepth(stack_map, depth);
- return DecodeDexRegisterMap(inline_info.GetDexRegisterMaskIndex(),
- inline_info.GetDexRegisterMapIndex(),
- num_dex_registers);
+ return invoke_infos_.GetRow(index);
}
- InlineInfo GetInlineInfo(size_t index) const {
- return InlineInfo(&inline_infos_, index);
+ ALWAYS_INLINE DexRegisterMap GetDexRegisterMapOf(StackMap stack_map) const {
+ if (stack_map.HasDexRegisterMap()) {
+ DexRegisterMap map(number_of_dex_registers_, DexRegisterLocation::Invalid());
+ DecodeDexRegisterMap(stack_map.Row(), /* first_dex_register */ 0, &map);
+ return map;
+ }
+ return DexRegisterMap(0, DexRegisterLocation::None());
+ }
+
+ ALWAYS_INLINE DexRegisterMap GetInlineDexRegisterMapOf(StackMap stack_map,
+ InlineInfo inline_info) const {
+ if (stack_map.HasDexRegisterMap()) {
+ DCHECK(stack_map.HasInlineInfoIndex());
+ uint32_t depth = inline_info.Row() - stack_map.GetInlineInfoIndex();
+ // The register counts are commutative and include all outer levels.
+ // This allows us to determine the range [first, last) in just two lookups.
+ // If we are at depth 0 (the first inlinee), the count from the main method is used.
+ uint32_t first = (depth == 0)
+ ? number_of_dex_registers_
+ : inline_infos_.GetRow(inline_info.Row() - 1).GetNumberOfDexRegisters();
+ uint32_t last = inline_info.GetNumberOfDexRegisters();
+ DexRegisterMap map(last - first, DexRegisterLocation::Invalid());
+ DecodeDexRegisterMap(stack_map.Row(), first, &map);
+ return map;
+ }
+ return DexRegisterMap(0, DexRegisterLocation::None());
}
- uint32_t GetInlineDepthOf(StackMap stack_map) const {
- uint32_t depth = 0;
+ BitTableRange<InlineInfo> GetInlineInfosOf(StackMap stack_map) const {
uint32_t index = stack_map.GetInlineInfoIndex();
if (index != StackMap::kNoValue) {
- while (GetInlineInfo(index + depth++).GetIsLast() == InlineInfo::kMore) { }
+ auto begin = inline_infos_.begin() + index;
+ auto end = begin;
+ while ((*end++).GetIsLast() == InlineInfo::kMore) { }
+ return BitTableRange<InlineInfo>(begin, end);
+ } else {
+ return BitTableRange<InlineInfo>();
}
- return depth;
- }
-
- InlineInfo GetInlineInfoAtDepth(StackMap stack_map, uint32_t depth) const {
- DCHECK(stack_map.HasInlineInfo());
- DCHECK_LT(depth, GetInlineDepthOf(stack_map));
- return GetInlineInfo(stack_map.GetInlineInfoIndex() + depth);
}
StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
- for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetDexPc() == dex_pc) {
+ for (StackMap stack_map : stack_maps_) {
+ if (stack_map.GetDexPc() == dex_pc && stack_map.GetKind() != StackMap::Kind::Debug) {
return stack_map;
}
}
- return StackMap();
+ return stack_maps_.GetInvalidRow();
}
- // Searches the stack map list backwards because catch stack maps are stored
- // at the end.
+ // Searches the stack map list backwards because catch stack maps are stored at the end.
StackMap GetCatchStackMapForDexPc(uint32_t dex_pc) const {
for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
StackMap stack_map = GetStackMapAt(i - 1);
- if (stack_map.GetDexPc() == dex_pc) {
+ if (stack_map.GetDexPc() == dex_pc && stack_map.GetKind() == StackMap::Kind::Catch) {
return stack_map;
}
}
- return StackMap();
+ return stack_maps_.GetInvalidRow();
}
StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
- size_t e = GetNumberOfStackMaps();
- if (e == 0) {
- // There cannot be OSR stack map if there is no stack map.
- return StackMap();
- }
- // Walk over all stack maps. If two consecutive stack maps are identical, then we
- // have found a stack map suitable for OSR.
- for (size_t i = 0; i < e - 1; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetDexPc() == dex_pc) {
- StackMap other = GetStackMapAt(i + 1);
- if (other.GetDexPc() == dex_pc &&
- other.GetNativePcOffset(kRuntimeISA) ==
- stack_map.GetNativePcOffset(kRuntimeISA)) {
- DCHECK_EQ(other.GetDexRegisterMapIndex(),
- stack_map.GetDexRegisterMapIndex());
- if (i < e - 2) {
- // Make sure there are not three identical stack maps following each other.
- DCHECK_NE(
- stack_map.GetNativePcOffset(kRuntimeISA),
- GetStackMapAt(i + 2).GetNativePcOffset(kRuntimeISA));
- }
- return stack_map;
- }
- }
- }
- return StackMap();
- }
-
- StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
- // TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
- // maps are not. If we knew that the method does not have try/catch,
- // we could do binary search.
- for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i);
- if (stack_map.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
+ for (StackMap stack_map : stack_maps_) {
+ if (stack_map.GetDexPc() == dex_pc && stack_map.GetKind() == StackMap::Kind::OSR) {
return stack_map;
}
}
- return StackMap();
+ return stack_maps_.GetInvalidRow();
}
+ StackMap GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa = kRuntimeISA) const;
+
InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
- for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
- InvokeInfo item = GetInvokeInfo(index);
+ for (InvokeInfo item : invoke_infos_) {
if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
return item;
}
}
- return InvokeInfo();
+ return invoke_infos_.GetInvalidRow();
}
// Dump this CodeInfo object on `vios`.
// `code_offset` is the (absolute) native PC of the compiled method.
void Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
- uint16_t number_of_dex_registers,
bool verbose,
InstructionSet instruction_set,
const MethodInfo& method_info) const;
+ // Accumulate code info size statistics into the given Stats tree.
+ void AddSizeStats(/*out*/ Stats* parent) const;
+
+ ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) {
+ return QuickMethodFrameInfo(
+ DecodeUnsignedLeb128(&data),
+ DecodeUnsignedLeb128(&data),
+ DecodeUnsignedLeb128(&data));
+ }
+
private:
- ALWAYS_INLINE DexRegisterMap DecodeDexRegisterMap(uint32_t mask_index,
- uint32_t map_index,
- uint32_t num_dex_registers) const {
- DexRegisterMap map(map_index == StackMap::kNoValue ? 0 : num_dex_registers);
- if (mask_index != StackMap::kNoValue) {
- BitMemoryRegion mask = dex_register_masks_.GetBitMemoryRegion(mask_index);
- num_dex_registers = std::min<uint32_t>(num_dex_registers, mask.size_in_bits());
- DexRegisterLocation* regs = map.data();
- for (uint32_t r = 0; r < mask.size_in_bits(); r++) {
- if (mask.LoadBit(r) /* is_live */) {
- DCHECK_LT(r, map.size());
- regs[r] = GetDexRegisterCatalogEntry(dex_register_maps_.Get(map_index++));
- }
- }
- }
- return map;
- }
-
- void Decode(const uint8_t* data) {
- size_t non_header_size = DecodeUnsignedLeb128(&data);
- BitMemoryRegion region(MemoryRegion(const_cast<uint8_t*>(data), non_header_size));
- size_t bit_offset = 0;
- size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
- stack_maps_.Decode(region, &bit_offset);
- register_masks_.Decode(region, &bit_offset);
- stack_masks_.Decode(region, &bit_offset);
- invoke_infos_.Decode(region, &bit_offset);
- inline_infos_.Decode(region, &bit_offset);
- dex_register_masks_.Decode(region, &bit_offset);
- dex_register_maps_.Decode(region, &bit_offset);
- dex_register_catalog_.Decode(region, &bit_offset);
- CHECK_EQ(non_header_size, BitsToBytesRoundUp(bit_offset)) << "Invalid CodeInfo";
- }
-
- size_t size_;
- BitTable<StackMap::kCount> stack_maps_;
- BitTable<RegisterMask::kCount> register_masks_;
- BitTable<1> stack_masks_;
- BitTable<InvokeInfo::kCount> invoke_infos_;
- BitTable<InlineInfo::kCount> inline_infos_;
- BitTable<1> dex_register_masks_;
- BitTable<1> dex_register_maps_;
- BitTable<DexRegisterInfo::kCount> dex_register_catalog_;
-
- friend class OatDumper;
+ // Returns lower bound (fist stack map which has pc greater or equal than the desired one).
+ // It ignores catch stack maps at the end (it is the same as if they had maximum pc value).
+ BitTable<StackMap>::const_iterator BinarySearchNativePc(uint32_t packed_pc) const;
+
+ // Scan backward to determine dex register locations at given stack map.
+ void DecodeDexRegisterMap(uint32_t stack_map_index,
+ uint32_t first_dex_register,
+ /*out*/ DexRegisterMap* map) const;
+
+ void Decode(const uint8_t* data);
+
+ uint32_t frame_size_in_bytes_;
+ uint32_t core_spill_mask_;
+ uint32_t fp_spill_mask_;
+ uint32_t number_of_dex_registers_;
+ BitTable<StackMap> stack_maps_;
+ BitTable<RegisterMask> register_masks_;
+ BitTable<MaskInfo> stack_masks_;
+ BitTable<InvokeInfo> invoke_infos_;
+ BitTable<InlineInfo> inline_infos_;
+ BitTable<MaskInfo> dex_register_masks_;
+ BitTable<DexRegisterMapInfo> dex_register_maps_;
+ BitTable<DexRegisterInfo> dex_register_catalog_;
+ uint32_t size_in_bits_;
};
#undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/subtype_check.h b/runtime/subtype_check.h
index 1fe62e8f46..aac547eb78 100644
--- a/runtime/subtype_check.h
+++ b/runtime/subtype_check.h
@@ -542,15 +542,17 @@ struct SubtypeCheck {
int32_t new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current() != nullptr && Runtime::Current()->IsActiveTransaction()) {
- return klass->template
- CasFieldWeakSequentiallyConsistent32</*kTransactionActive*/true>(offset,
- old_value,
- new_value);
+ return klass->template CasField32</*kTransactionActive*/true>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
} else {
- return klass->template
- CasFieldWeakSequentiallyConsistent32</*kTransactionActive*/false>(offset,
- old_value,
- new_value);
+ return klass->template CasField32</*kTransactionActive*/false>(offset,
+ old_value,
+ new_value,
+ CASMode::kWeak,
+ std::memory_order_seq_cst);
}
}
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 91fcc07d65..e40bca57fe 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -121,11 +121,11 @@ struct SubtypeCheckInfoTest : public ::testing::Test {
return SubtypeCheckInfo::MakeUnchecked(bs, overflow, depth);
}
- static bool HasNext(SubtypeCheckInfo io) {
+ static bool HasNext(const SubtypeCheckInfo& io) {
return io.HasNext();
}
- static BitString GetPathToRoot(SubtypeCheckInfo io) {
+ static BitString GetPathToRoot(const SubtypeCheckInfo& io) {
return io.GetPathToRoot();
}
diff --git a/runtime/subtype_check_test.cc b/runtime/subtype_check_test.cc
index e297d0beb4..666bf812f5 100644
--- a/runtime/subtype_check_test.cc
+++ b/runtime/subtype_check_test.cc
@@ -86,9 +86,11 @@ struct MockClass {
}
template <bool kTransactionActive>
- bool CasFieldWeakSequentiallyConsistent32(art::MemberOffset offset,
- int32_t old_value,
- int32_t new_value)
+ bool CasField32(art::MemberOffset offset,
+ int32_t old_value,
+ int32_t new_value,
+ CASMode mode ATTRIBUTE_UNUSED,
+ std::memory_order memory_order ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(offset);
if (old_value == GetField32Volatile(offset)) {
@@ -652,13 +654,15 @@ void EnsureStateChangedTestRecursive(
MockClass* klass,
size_t cur_depth,
size_t total_depth,
- std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>> transitions) {
+ const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
MockScopedLockSubtypeCheck lock_a;
MockScopedLockMutator lock_b;
using SCTree = MockSubtypeCheck;
ASSERT_EQ(cur_depth, klass->Depth());
- ApplyTransition(SCTree::Lookup(klass), transitions[cur_depth].first, transitions[cur_depth].second);
+ ApplyTransition(SCTree::Lookup(klass),
+ transitions[cur_depth].first,
+ transitions[cur_depth].second);
if (total_depth == cur_depth + 1) {
return;
@@ -674,7 +678,7 @@ void EnsureStateChangedTestRecursive(
void EnsureStateChangedTest(
MockClass* root,
size_t depth,
- std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>> transitions) {
+ const std::vector<std::pair<SubtypeCheckInfo::State, SubtypeCheckInfo::State>>& transitions) {
ASSERT_EQ(depth, transitions.size());
EnsureStateChangedTestRecursive(root, /*cur_depth*/0u, depth, transitions);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index a8133a1fda..19d9485f5e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -504,6 +504,13 @@ static size_t FixStackSize(size_t stack_size) {
// so include that here to support apps that expect large native stacks.
stack_size += 1 * MB;
+ // Under sanitization, frames of the interpreter may become bigger, both for C code as
+ // well as the ShadowFrame. Ensure a larger minimum size. Otherwise initialization
+ // of all core classes cannot be done in all test circumstances.
+ if (kMemoryToolIsAvailable) {
+ stack_size = std::max(2 * MB, stack_size);
+ }
+
// It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
if (stack_size < PTHREAD_STACK_MIN) {
stack_size = PTHREAD_STACK_MIN;
@@ -598,7 +605,7 @@ void Thread::InstallImplicitProtection() {
1u;
#endif
volatile char space[kPageSize - (kAsanMultiplier * 256)];
- char sink ATTRIBUTE_UNUSED = space[zero];
+ char sink ATTRIBUTE_UNUSED = space[zero]; // NOLINT
if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) {
Touch(target);
}
@@ -1116,21 +1123,10 @@ bool Thread::InitStackHwm() {
Runtime* runtime = Runtime::Current();
bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
- // Valgrind on arm doesn't give the right values here. Do not install the guard page, and
- // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
- // stack_begin to 0.
- const bool valgrind_on_arm =
- (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kArm64) &&
- kMemoryToolIsValgrind &&
- RUNNING_ON_MEMORY_TOOL != 0;
- if (valgrind_on_arm) {
- tlsPtr_.stack_begin = nullptr;
- }
-
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
- if (implicit_stack_check && !valgrind_on_arm) {
+ if (implicit_stack_check) {
// The thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
@@ -1583,7 +1579,7 @@ void Thread::FullSuspendCheck() {
VLOG(threads) << this << " self-suspending";
// Make thread appear suspended to other threads, release mutator_lock_.
// Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
- ScopedThreadSuspension(this, kSuspended);
+ ScopedThreadSuspension(this, kSuspended); // NOLINT
VLOG(threads) << this << " self-reviving";
}
@@ -2049,15 +2045,15 @@ void Thread::FinishStartup() {
// Finish attaching the main thread.
ScopedObjectAccess soa(Thread::Current());
- Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
- Thread::Current()->AssertNoPendingException();
+ soa.Self()->CreatePeer("main", false, runtime->GetMainThreadGroup());
+ soa.Self()->AssertNoPendingException();
- Runtime::Current()->GetClassLinker()->RunRootClinits();
+ runtime->RunRootClinits(soa.Self());
// The thread counts as started from now on. We need to add it to the ThreadGroup. For regular
// threads, this is done in Thread.start() on the Java side.
- Thread::Current()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
- Thread::Current()->AssertNoPendingException();
+ soa.Self()->NotifyThreadGroup(soa, runtime->GetMainThreadGroup());
+ soa.Self()->AssertNoPendingException();
}
void Thread::Shutdown() {
@@ -3656,8 +3652,7 @@ class ReferenceMapVisitor : public StackVisitor {
RootVisitor& _visitor)
: number_of_dex_registers(method->DexInstructionData().RegistersSize()),
code_info(_code_info),
- dex_register_map(code_info.GetDexRegisterMapOf(map,
- number_of_dex_registers)),
+ dex_register_map(code_info.GetDexRegisterMapOf(map)),
visitor(_visitor) {
}
@@ -3670,7 +3665,7 @@ class ReferenceMapVisitor : public StackVisitor {
REQUIRES_SHARED(Locks::mutator_lock_) {
bool found = false;
for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
- DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(dex_reg);
+ DexRegisterLocation location = dex_register_map[dex_reg];
if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
visitor(ref, dex_reg, stack_visitor);
found = true;
diff --git a/runtime/thread.h b/runtime/thread.h
index 3ec050a5eb..c8a4b61792 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -817,13 +817,17 @@ class Thread {
}
uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
- if (implicit_overflow_check) {
- // The interpreter needs the extra overflow bytes that stack_end does
- // not include.
- return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
- } else {
- return tlsPtr_.stack_end;
+ uint8_t* end = tlsPtr_.stack_end + (implicit_overflow_check
+ ? GetStackOverflowReservedBytes(kRuntimeISA)
+ : 0);
+ if (kIsDebugBuild) {
+ // In a debuggable build, but especially under ASAN, the access-checks interpreter has a
+ // potentially humongous stack size. We don't want to take too much of the stack regularly,
+ // so do not increase the regular reserved size (for compiled code etc) and only report the
+ // virtually smaller stack to the interpreter here.
+ end += GetStackOverflowReservedBytes(kRuntimeISA);
}
+ return end;
}
uint8_t* GetStackEnd() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 59617481eb..47877bd195 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -212,8 +212,6 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
bool allow_soft_failures,
HardFailLogMode log_level,
std::string* error) {
- SCOPED_TRACE << "VerifyClass " << PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
-
// A class must not be abstract and final.
if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
*error = "Verifier rejected class ";
@@ -223,6 +221,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
}
ClassAccessor accessor(*dex_file, class_def);
+ SCOPED_TRACE << "VerifyClass " << PrettyDescriptor(accessor.GetDescriptor());
int64_t previous_method_idx[2] = { -1, -1 };
MethodVerifier::FailureData failure_data;
@@ -3054,10 +3053,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// Step 2. Check the register arguments correspond to the expected arguments for the
// method handle produced by step 1. The dex file verifier has checked ranges for
// the first three arguments and CheckCallSite has checked the method handle type.
- CallSiteArrayValueIterator it(*dex_file_, dex_file_->GetCallSiteId(call_site_idx));
- it.Next(); // Skip to name.
- it.Next(); // Skip to method type of the method handle
- const dex::ProtoIndex proto_idx(it.GetJavaValue().c);
+ const dex::ProtoIndex proto_idx = dex_file_->GetProtoIndexForCallSite(call_site_idx);
const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
DexFileParameterIterator param_it(*dex_file_, proto_id);
// Treat method as static as it has yet to be determined.
@@ -3073,8 +3069,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(&reg_types_));
}
just_set_result = true;
- // TODO: Add compiler support for invoke-custom (b/35337872).
- Fail(VERIFY_ERROR_FORCE_INTERPRETER);
break;
}
case Instruction::NEG_INT:
@@ -3845,6 +3839,8 @@ ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(
template <class T>
ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(
T* it, const Instruction* inst, MethodType method_type, bool is_range, ArtMethod* res_method) {
+ DCHECK_EQ(!is_range, inst->HasVarArgs());
+
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
@@ -4008,24 +4004,41 @@ bool MethodVerifier::CheckCallSite(uint32_t call_site_idx) {
CallSiteArrayValueIterator it(*dex_file_, dex_file_->GetCallSiteId(call_site_idx));
// Check essential arguments are provided. The dex file verifier has verified indicies of the
// main values (method handle, name, method_type).
- if (it.Size() < 3) {
+ static const size_t kRequiredArguments = 3;
+ if (it.Size() < kRequiredArguments) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
<< " has too few arguments: "
- << it.Size() << " < 3";
+ << it.Size() << " < " << kRequiredArguments;
return false;
}
- // Get and check the first argument: the method handle (index range
- // checked by the dex file verifier).
- uint32_t method_handle_idx = static_cast<uint32_t>(it.GetJavaValue().i);
- if (method_handle_idx > dex_file_->NumMethodHandles()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site id #" << call_site_idx
- << " method handle index invalid " << method_handle_idx
- << " >= " << dex_file_->NumMethodHandles();
- return false;
+ std::pair<const EncodedArrayValueIterator::ValueType, size_t> type_and_max[kRequiredArguments] =
+ { { EncodedArrayValueIterator::ValueType::kMethodHandle, dex_file_->NumMethodHandles() },
+ { EncodedArrayValueIterator::ValueType::kString, dex_file_->NumStringIds() },
+ { EncodedArrayValueIterator::ValueType::kMethodType, dex_file_->NumProtoIds() }
+ };
+ uint32_t index[kRequiredArguments];
+
+ // Check arguments have expected types and are within permitted ranges.
+ for (size_t i = 0; i < kRequiredArguments; ++i) {
+ if (it.GetValueType() != type_and_max[i].first) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site id #" << call_site_idx
+ << " argument " << i << " has wrong type "
+ << it.GetValueType() << "!=" << type_and_max[i].first;
+ return false;
+ }
+ index[i] = static_cast<uint32_t>(it.GetJavaValue().i);
+ if (index[i] >= type_and_max[i].second) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site id #" << call_site_idx
+ << " argument " << i << " bad index "
+ << index[i] << " >= " << type_and_max[i].second;
+ return false;
+ }
+ it.Next();
}
- const DexFile::MethodHandleItem& mh = dex_file_->GetMethodHandle(method_handle_idx);
+ // Check method handle kind is valid.
+ const DexFile::MethodHandleItem& mh = dex_file_->GetMethodHandle(index[0]);
if (mh.method_handle_type_ != static_cast<uint16_t>(DexFile::MethodHandleType::kInvokeStatic)) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Call site #" << call_site_idx
<< " argument 0 method handle type is not InvokeStatic: "
diff --git a/runtime/verify_object.h b/runtime/verify_object.h
index 5c665b34c4..cc288e7a88 100644
--- a/runtime/verify_object.h
+++ b/runtime/verify_object.h
@@ -63,6 +63,10 @@ static inline void VerifyObject(ObjPtr<mirror::Object> obj) NO_THREAD_SAFETY_ANA
}
}
+inline constexpr VerifyObjectFlags RemoveThisFlags(VerifyObjectFlags flags) {
+ return static_cast<VerifyObjectFlags>(flags & ~kVerifyThis);
+}
+
// Check that c.getClass() == c.getClass().getClass().
ALWAYS_INLINE bool VerifyClassClass(ObjPtr<mirror::Class> c) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index c64e7bbca1..206418fbc6 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -26,6 +26,7 @@
#include "base/enums.h"
#include "class_linker.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
#include "hidden_api.h"
#include "jni/jni_internal.h"
#include "mirror/class.h"
@@ -98,6 +99,7 @@ jmethodID WellKnownClasses::java_lang_Long_valueOf;
jmethodID WellKnownClasses::java_lang_ref_FinalizerReference_add;
jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add;
jmethodID WellKnownClasses::java_lang_reflect_Parameter_init;
+jmethodID WellKnownClasses::java_lang_reflect_Proxy_init;
jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
jmethodID WellKnownClasses::java_lang_Runtime_nativeLoad;
jmethodID WellKnownClasses::java_lang_Short_valueOf;
@@ -418,6 +420,14 @@ void WellKnownClasses::LateInit(JNIEnv* env) {
CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad",
"(Ljava/lang/String;Ljava/lang/ClassLoader;)"
"Ljava/lang/String;");
+ java_lang_reflect_Proxy_init =
+ CacheMethod(env, java_lang_reflect_Proxy, false, "<init>",
+ "(Ljava/lang/reflect/InvocationHandler;)V");
+ // This invariant is important since otherwise we will have the entire proxy invoke system
+ // confused.
+ DCHECK_NE(
+ jni::DecodeArtMethod(java_lang_reflect_Proxy_init)->GetEntryPointFromQuickCompiledCode(),
+ GetQuickInstrumentationEntryPoint());
java_lang_reflect_Proxy_invoke =
CacheMethod(env, java_lang_reflect_Proxy, true, "invoke",
"(Ljava/lang/reflect/Proxy;Ljava/lang/reflect/Method;"
@@ -484,6 +494,7 @@ void WellKnownClasses::Clear() {
java_lang_ref_FinalizerReference_add = nullptr;
java_lang_ref_ReferenceQueue_add = nullptr;
java_lang_reflect_Parameter_init = nullptr;
+ java_lang_reflect_Proxy_init = nullptr;
java_lang_reflect_Proxy_invoke = nullptr;
java_lang_Runtime_nativeLoad = nullptr;
java_lang_Short_valueOf = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index c81062f594..ce5ab1df84 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -108,6 +108,7 @@ struct WellKnownClasses {
static jmethodID java_lang_ref_FinalizerReference_add;
static jmethodID java_lang_ref_ReferenceQueue_add;
static jmethodID java_lang_reflect_Parameter_init;
+ static jmethodID java_lang_reflect_Proxy_init;
static jmethodID java_lang_reflect_Proxy_invoke;
static jmethodID java_lang_Runtime_nativeLoad;
static jmethodID java_lang_Short_valueOf;
diff --git a/runtime/write_barrier-inl.h b/runtime/write_barrier-inl.h
new file mode 100644
index 0000000000..af8c1be828
--- /dev/null
+++ b/runtime/write_barrier-inl.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_WRITE_BARRIER_INL_H_
+#define ART_RUNTIME_WRITE_BARRIER_INL_H_
+
+#include "write_barrier.h"
+
+#include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+template <WriteBarrier::NullCheck kNullCheck>
+inline void WriteBarrier::ForFieldWrite(ObjPtr<mirror::Object> dst,
+ MemberOffset offset ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Object> new_value) {
+ if (kNullCheck == kWithNullCheck && new_value == nullptr) {
+ return;
+ }
+ DCHECK(new_value != nullptr);
+ GetCardTable()->MarkCard(dst.Ptr());
+}
+
+inline void WriteBarrier::ForArrayWrite(ObjPtr<mirror::Object> dst,
+ int start_offset ATTRIBUTE_UNUSED,
+ size_t length ATTRIBUTE_UNUSED) {
+ GetCardTable()->MarkCard(dst.Ptr());
+}
+
+inline void WriteBarrier::ForEveryFieldWrite(ObjPtr<mirror::Object> obj) {
+ GetCardTable()->MarkCard(obj.Ptr());
+}
+
+inline gc::accounting::CardTable* WriteBarrier::GetCardTable() {
+ return Runtime::Current()->GetHeap()->GetCardTable();
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_WRITE_BARRIER_INL_H_
diff --git a/runtime/write_barrier.h b/runtime/write_barrier.h
new file mode 100644
index 0000000000..112154e14a
--- /dev/null
+++ b/runtime/write_barrier.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_WRITE_BARRIER_H_
+#define ART_RUNTIME_WRITE_BARRIER_H_
+
+#include "base/macros.h"
+
+namespace art {
+
+namespace gc {
+namespace accounting {
+class CardTable;
+} // namespace accounting
+} // namespace gc
+
+class WriteBarrier {
+ public:
+ enum NullCheck {
+ kWithoutNullCheck,
+ kWithNullCheck,
+ };
+
+ // Must be called if a reference field of an Object in the heap changes, and before any GC
+ // safe-point. The call is not needed if null is stored in the field.
+ template <NullCheck kNullCheck = kWithNullCheck>
+ ALWAYS_INLINE static void ForFieldWrite(ObjPtr<mirror::Object> dst,
+ MemberOffset offset ATTRIBUTE_UNUSED,
+ ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Must be called if a reference field of an ObjectArray in the heap changes, and before any GC
+ // safe-point. The call is not needed if null is stored in the field.
+ ALWAYS_INLINE static void ForArrayWrite(ObjPtr<mirror::Object> dst,
+ int start_offset ATTRIBUTE_UNUSED,
+ size_t length ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Write barrier for every reference field in an object.
+ ALWAYS_INLINE static void ForEveryFieldWrite(ObjPtr<mirror::Object> obj)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ ALWAYS_INLINE static gc::accounting::CardTable* GetCardTable();
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_WRITE_BARRIER_H_
diff --git a/sigchainlib/sigchain_test.cc b/sigchainlib/sigchain_test.cc
index 1d1e54f127..9584ded65f 100644
--- a/sigchainlib/sigchain_test.cc
+++ b/sigchainlib/sigchain_test.cc
@@ -70,7 +70,7 @@ class SigchainTest : public ::testing::Test {
};
-static void TestSignalBlocking(std::function<void()> fn) {
+static void TestSignalBlocking(const std::function<void()>& fn) {
// Unblock SIGSEGV, make sure it stays unblocked.
sigset64_t mask;
sigemptyset64(&mask);
diff --git a/test/003-omnibus-opcodes/build b/test/003-omnibus-opcodes/build
index 4d3fb37d1d..c2e611259b 100644
--- a/test/003-omnibus-opcodes/build
+++ b/test/003-omnibus-opcodes/build
@@ -17,12 +17,20 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/UnresClass.class
-${JAVAC} -d classes `find src2 -name '*.java'`
+export ORIGINAL_JAVAC="$JAVAC"
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
+
+ $ORIGINAL_JAVAC "$@"
+ rm -f classes/UnresClass.class
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/004-JniTest/build b/test/004-JniTest/build
index e563d734c2..a786b8bc62 100755
--- a/test/004-JniTest/build
+++ b/test/004-JniTest/build
@@ -23,16 +23,18 @@
# This enables the test to compile with vanilla RI javac and work on either ART or RI.
#
+# Stop on failure.
+set -e
+
export ORIGINAL_JAVAC="$JAVAC"
-# Delete CriticalNative.java, FastNative.java annotations after building the .class files.
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
$ORIGINAL_JAVAC "$@"
- local stat=$?
-
- [[ -d classes ]] && (find classes/dalvik -name '*.class' | xargs rm -rf)
-
- return $stat
+ # Delete CriticalNative.java, FastNative.java annotations after building the .class files.
+ find classes/dalvik -name '*.class' -exec rm {} \;
}
export -f javac_wrapper
@@ -40,28 +42,6 @@ export JAVAC=javac_wrapper
######################################################################
-# Use the original dx with no extra magic or pessimizing flags.
-# This ensures that any default optimizations that dx do would not break JNI.
-
-export ORIGINAL_DX="$DX"
-
-# Filter out --debug flag from dx.
-function dx_wrapper {
- local args=("$@")
- local args_filtered=()
- for i in "${args[@]}"; do
- case "$i" in
- --debug)
- ;;
- *)
- args_filtered+=("$i")
- ;;
- esac
- done
- "$ORIGINAL_DX" "${args_filtered[@]}"
-}
-
-export -f dx_wrapper
-export DX=dx_wrapper
-
+# Use release mode to check optimizations do not break JNI.
+export D8_FLAGS=--release
./default-build "$@"
diff --git a/test/004-ReferenceMap/build b/test/004-ReferenceMap/build
index 08987b556c..d928cd7daf 100644
--- a/test/004-ReferenceMap/build
+++ b/test/004-ReferenceMap/build
@@ -17,10 +17,26 @@
# Stop if something fails.
set -e
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
+# This test depends on the exact format of the DEX file. Since dx is deprecated,
+# the classes.dex file is packaged as a test input. It was created with:
+#
+# $ javac -g -Xlint:-options -source 1.7 -target 1.7 -d classes src/Main.java
+# $ dx --debug --dex --output=classes.dex classes
+
+# Wrapper function for javac which for this test does nothing as the
+# test uses a pre-built DEX file.
+function javac_wrapper {
+ # Nothing to compile, using dx generated classes.dex.
+ return 0
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+# Do not invoke D8 for this test.
+export D8=':'
+
+######################################################################
+
+jar -cf classes.jar classes.dex
+./default-build "$@"
diff --git a/test/004-ReferenceMap/classes.dex b/test/004-ReferenceMap/classes.dex
new file mode 100644
index 0000000000..993c077e43
--- /dev/null
+++ b/test/004-ReferenceMap/classes.dex
Binary files differ
diff --git a/test/004-StackWalk/build b/test/004-StackWalk/build
index 08987b556c..eeecbfcc40 100644
--- a/test/004-StackWalk/build
+++ b/test/004-StackWalk/build
@@ -17,10 +17,25 @@
# Stop if something fails.
set -e
-# The test relies on DEX file produced by javac+dx so keep building with them for now
-# (see b/19467889)
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex \
- --dump-width=1000 ${DX_FLAGS} classes
-zip $TEST_NAME.jar classes.dex
+# This test depends on the exact format of the DEX file. Since dx is deprecated,
+# the classes.dex file is packaged as a test input. It was created with:
+#
+# $ javac -g -Xlint:-options -source 1.7 -target 1.7 -d classes src/Main.java
+# $ dx --debug --dex --output=classes.dex classes
+
+# Wrapper function for javac which for this test does nothing as the
+# test uses a pre-built DEX file.
+function javac_wrapper {
+ return 0
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+# Do not invoke D8 for this test.
+export D8=':'
+
+######################################################################
+
+jar -cf classes.jar classes.dex
+./default-build "$@"
diff --git a/test/004-StackWalk/classes.dex b/test/004-StackWalk/classes.dex
new file mode 100644
index 0000000000..ad452960c3
--- /dev/null
+++ b/test/004-StackWalk/classes.dex
Binary files differ
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
index 8004036868..067e0d0407 100755
--- a/test/004-ThreadStress/run
+++ b/test/004-ThreadStress/run
@@ -15,7 +15,29 @@
# limitations under the License.
# Enable lock contention logging.
-${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
+if [[ "x$ART_DEFAULT_GC_TYPE" = xGSS ]]; then
+ # NonMovingAlloc operations fail an assertion with the Generational
+ # Semi-Space (GSS) collector (see b/72738921); disable them for now
+ # by explicitly assigning frequencies to operations when the GSS
+ # collector is used.
+ #
+ # Note: The trick to use command substitution to have comments within
+ # a multi-line command is from https://stackoverflow.com/a/12797512.
+ ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" Main \
+ -oom:0.005 `# 1/200` \
+ -sigquit:0.095 `# 19/200` \
+ -alloc:0.225 `# 45/200` \
+ -largealloc:0.05 `# 10/200` \
+ -nonmovingalloc:0.0 `# 0/200` \
+ -stacktrace:0.1 `# 20/200` \
+ -exit:0.225 `# 45/200` \
+ -sleep:0.125 `# 25/200` \
+ -timedwait:0.05 `# 10/200` \
+ -wait:0.075 `# 15/200` \
+ -queuedwait:0.05 `# 10/200`
+else
+ ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
+fi
return_status1=$?
# Run locks-only mode with stack-dump lock profiling. Reduce the number of total operations from
diff --git a/test/004-ThreadStress/src-art/Main.java b/test/004-ThreadStress/src-art/Main.java
index a142934638..3a89f4f166 100644
--- a/test/004-ThreadStress/src-art/Main.java
+++ b/test/004-ThreadStress/src-art/Main.java
@@ -315,11 +315,9 @@ public class Main implements Runnable {
Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>();
frequencyMap.put(new OOM(), 0.005); // 1/200
frequencyMap.put(new SigQuit(), 0.095); // 19/200
- frequencyMap.put(new Alloc(), 0.225); // 45/200
+ frequencyMap.put(new Alloc(), 0.2); // 40/200
frequencyMap.put(new LargeAlloc(), 0.05); // 10/200
- // TODO: NonMovingAlloc operations fail an assertion with the
- // GSS collector (see b/72738921); disable them for now.
- frequencyMap.put(new NonMovingAlloc(), 0.0); // 0/200
+ frequencyMap.put(new NonMovingAlloc(), 0.025); // 5/200
frequencyMap.put(new StackTrace(), 0.1); // 20/200
frequencyMap.put(new Exit(), 0.225); // 45/200
frequencyMap.put(new Sleep(), 0.125); // 25/200
@@ -379,6 +377,8 @@ public class Main implements Runnable {
op = new Alloc();
} else if (split[0].equals("-largealloc")) {
op = new LargeAlloc();
+ } else if (split[0].equals("-nonmovingalloc")) {
+ op = new NonMovingAlloc();
} else if (split[0].equals("-stacktrace")) {
op = new StackTrace();
} else if (split[0].equals("-exit")) {
diff --git a/test/005-annotations/build b/test/005-annotations/build
index 8eb07a9bf5..5342eea4c4 100644
--- a/test/005-annotations/build
+++ b/test/005-annotations/build
@@ -17,18 +17,28 @@
# Stop if something fails.
set -e
-mkdir classes
+export ORIGINAL_JAVAC="$JAVAC"
-# android.test.anno.MissingAnnotation is available at compile time...
-${JAVAC} -d classes `find src -name '*.java'`
-# overwrite RenamedEnum
-${JAVAC} -d classes `find src2 -name '*.java'`
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
-# ...but not at run time.
-rm 'classes/android/test/anno/MissingAnnotation.class'
-rm 'classes/android/test/anno/ClassWithInnerAnnotationClass$MissingInnerAnnotationClass.class'
+ $ORIGINAL_JAVAC "$@"
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+ # Classes available at compile time, but not at runtime.
+ rm -f classes/android/test/anno/MissingAnnotation.class
+ rm -f 'classes/android/test/anno/ClassWithInnerAnnotationClass$MissingInnerAnnotationClass.class'
+
+ # overwrite RenamedEnum in classes
+ if [ -f classes2/android/test/anno/RenamedEnumClass.java ] ; then
+ mv classes2/android/test/anno/RenamedEnumClass.java classes/android/test/anno/RenamedEnumClass.java
+ fi
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/022-interface/build b/test/022-interface/build
deleted file mode 100644
index f6aad91e97..0000000000
--- a/test/022-interface/build
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-${DX} --debug --dex --dump-to=classes.lst --output=classes.dex classes
-
-zip $TEST_NAME.jar classes.dex
diff --git a/test/056-const-string-jumbo/build b/test/056-const-string-jumbo/build
index 47641d5891..c1d711b436 100644
--- a/test/056-const-string-jumbo/build
+++ b/test/056-const-string-jumbo/build
@@ -39,10 +39,4 @@ function writeFile(name, start, end) {
printf("}\n") > fileName;
}'
-mkdir classes
-${JAVAC} -d classes src/*.java
-
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx500m --debug --dex --no-optimize --positions=none --no-locals --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+./default-build "$@"
diff --git a/test/569-checker-pattern-replacement/build b/test/066-mismatched-super/build
index d85147f17b..c1c9ed304e 100644
--- a/test/569-checker-pattern-replacement/build
+++ b/test/066-mismatched-super/build
@@ -14,7 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
+DESUGAR=false ./default-build "$@"
diff --git a/test/089-many-methods/build b/test/089-many-methods/build
index ff77c60f64..5b4cda87c1 100644
--- a/test/089-many-methods/build
+++ b/test/089-many-methods/build
@@ -43,8 +43,9 @@ function writeFileMethod(name) {
printf("}\n") > fileName;
}'
-# The test relies on the error message produced by dx, not jack, so keep building with dx for now
-# (b/19467889).
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-${DX} -JXmx1024m --dex --no-optimize classes
+# Force DEX generation so test also passes with --jvm.
+export NEED_DEX=true
+
+# Specify old API level as d8 automagically produces a multidex file
+# when the API level is above 20. Failing the build here is deliberate.
+./default-build --api-level 20 "$@"
diff --git a/test/089-many-methods/check b/test/089-many-methods/check
index 65b71397b8..1f71e8e0a0 100755
--- a/test/089-many-methods/check
+++ b/test/089-many-methods/check
@@ -14,7 +14,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Strip build error debug messages, as they are environment-specific.
-sed -e '/^Failed to build/d' -e '/^Non-canonical tmpdir/d' -e '/^Args:/d' -e '/^Max filename/d' -e '/^Max pathlength/d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null \ No newline at end of file
+grep Error "$2" > "$2.tmp"
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/089-many-methods/expected.txt b/test/089-many-methods/expected.txt
index 786df7c76d..bb6ba3c7bc 100644
--- a/test/089-many-methods/expected.txt
+++ b/test/089-many-methods/expected.txt
@@ -1,6 +1 @@
-
-trouble writing output: Too many field references to fit in one dex file: 131000; max is 65536.
-You may try using multi-dex. If multi-dex is enabled then the list of classes for the main dex list is too large.
-References by package:
-131000 default
-build exit status: 2
+Error: Cannot fit requested classes in a single dex file (# fields: 131000 > 65536)
diff --git a/test/091-override-package-private-method/build b/test/091-override-package-private-method/build
index ea12b3a540..8257d92156 100755
--- a/test/091-override-package-private-method/build
+++ b/test/091-override-package-private-method/build
@@ -17,15 +17,20 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
+export ORIGINAL_JAVAC="$JAVAC"
-mkdir classes-ex
-mv classes/OverridePackagePrivateMethodSuper.class classes-ex
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
+ $ORIGINAL_JAVAC "$@"
+ mkdir -p classes-ex
+ mv classes/OverridePackagePrivateMethodSuper.class classes-ex
+}
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip $TEST_NAME.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
- zip ${TEST_NAME}-ex.jar classes.dex
-fi
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/111-unresolvable-exception/build b/test/111-unresolvable-exception/build
index 6fe73af8d8..1c275aa2fe 100644
--- a/test/111-unresolvable-exception/build
+++ b/test/111-unresolvable-exception/build
@@ -17,11 +17,22 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/TestException.class
-
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+export ORIGINAL_JAVAC="$JAVAC"
+
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
+
+ $ORIGINAL_JAVAC "$@"
+
+ # Remove class available at compile time but not at run time.
+ rm classes/TestException.class
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/113-multidex/build b/test/113-multidex/build
deleted file mode 100644
index f945563939..0000000000
--- a/test/113-multidex/build
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Stop if something fails.
-set -e
-
-# All except Main
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/Main.class
-
-# Only Main
-mkdir classes2
-${JAVAC} -d classes2 `find src -name '*.java'`
-rm classes2/Second.class classes2/FillerA.class classes2/FillerB.class classes2/Inf*.class
-
-if [ ${NEED_DEX} = "true" ]; then
- # All except Main
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
-
- # Only Main
- ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2
- zip $TEST_NAME.jar classes.dex classes2.dex
-fi
diff --git a/test/113-multidex/src/Main.java b/test/113-multidex/src-multidex/Main.java
index 1c74220525..1c74220525 100644
--- a/test/113-multidex/src/Main.java
+++ b/test/113-multidex/src-multidex/Main.java
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
index 7c382588a4..a8a895a6b3 100644
--- a/test/117-nopatchoat/nopatchoat.cc
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -28,7 +28,7 @@ namespace art {
class NoPatchoatTest {
public:
- static const OatFile::OatDexFile* getOatDexFile(jclass cls) {
+ static const OatDexFile* getOatDexFile(jclass cls) {
ScopedObjectAccess soa(Thread::Current());
ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
const DexFile& dex_file = klass->GetDexFile();
@@ -42,13 +42,13 @@ class NoPatchoatTest {
}
static bool hasExecutableOat(jclass cls) {
- const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+ const OatDexFile* oat_dex_file = getOatDexFile(cls);
return oat_dex_file != nullptr && oat_dex_file->GetOatFile()->IsExecutable();
}
static bool needsRelocation(jclass cls) {
- const OatFile::OatDexFile* oat_dex_file = getOatDexFile(cls);
+ const OatDexFile* oat_dex_file = getOatDexFile(cls);
if (oat_dex_file == nullptr) {
return false;
diff --git a/test/124-missing-classes/build b/test/124-missing-classes/build
index b13aa6e851..ec4ec84e09 100644
--- a/test/124-missing-classes/build
+++ b/test/124-missing-classes/build
@@ -17,16 +17,24 @@
# Stop if something fails.
set -e
-mkdir classes
+export ORIGINAL_JAVAC="$JAVAC"
-# Some classes are available at compile time...
-${JAVAC} -d classes `find src -name '*.java'`
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
-# ...but not at run time.
-rm 'classes/MissingClass.class'
-rm 'classes/Main$MissingInnerClass.class'
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+ # Some classes are available at compile time...
+ $ORIGINAL_JAVAC "$@"
+
+ # ...but not at run time.
+ rm 'classes/MissingClass.class' 'classes/Main$MissingInnerClass.class'
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/126-miranda-multidex/build b/test/126-miranda-multidex/build
index cf19855316..7b44863fa9 100644
--- a/test/126-miranda-multidex/build
+++ b/test/126-miranda-multidex/build
@@ -17,21 +17,30 @@
# Stop if something fails.
set -e
-# All except MirandaInterface
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-rm classes/MirandaInterface.class
+export ORIGINAL_JAVAC="$JAVAC"
-# Only MirandaInterface
-mkdir classes2
-${JAVAC} -d classes2 `find src -name '*.java'`
-rm classes2/Main.class classes2/MirandaAbstract.class classes2/MirandaClass*.class classes2/MirandaInterface2*.class
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
-if [ ${NEED_DEX} = "true" ]; then
- # All except Main
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex classes
+ if [[ "$*" != *"classes2"* ]]; then
+ # First invocation: compile src/ files.
+ $ORIGINAL_JAVAC "$@"
+ else
+ # Second invocation: move MirandaInterface.class for placement in
+ # a secondary dex file. There are no other source files for the
+ # secondary DEX so no compilation required.
+ mv classes/MirandaInterface.class classes2
+ fi
+ return $?
+}
- # Only Main
- ${DX} -JXmx256m --debug --dex --dump-to=classes2.lst --output=classes2.dex classes2
- zip $TEST_NAME.jar classes.dex classes2.dex
-fi
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+# Signal to default-build that this is a multidex test.
+mkdir src-multidex
+./default-build "$@"
diff --git a/test/127-checker-secondarydex/build b/test/127-checker-secondarydex/build
index 712774f7ef..3135681eec 100755
--- a/test/127-checker-secondarydex/build
+++ b/test/127-checker-secondarydex/build
@@ -17,15 +17,22 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-
-mkdir classes-ex
-mv classes/Super.class classes-ex
-
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip $TEST_NAME.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
- zip ${TEST_NAME}-ex.jar classes.dex
-fi
+export ORIGINAL_JAVAC="$JAVAC"
+
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
+
+ $ORIGINAL_JAVAC "$@"
+
+ mkdir classes-ex
+ mv classes/Super.class classes-ex
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 7ada47d304..985d27309e 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -54,15 +54,38 @@ static void CauseSegfault() {
#endif
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_sleep(JNIEnv*, jobject, jint, jboolean, jdouble) {
- // Keep pausing.
- struct timespec ts = { .tv_sec = 100, .tv_nsec = 0 };
- printf("Going to sleep\n");
- for (;;) {
- // Use nanosleep since it gets to the system call quickly and doesn't
- // have any points at which an unwind will fail.
- nanosleep(&ts, nullptr);
+extern "C" JNIEXPORT jint JNICALL Java_Main_startSecondaryProcess(JNIEnv*, jclass) {
+#if __linux__
+ // Get our command line so that we can use it to start identical process.
+ std::string cmdline; // null-separated and null-terminated arguments.
+ ReadFileToString("/proc/self/cmdline", &cmdline);
+ cmdline = cmdline + "--secondary" + '\0'; // Let the child know it is a helper.
+
+ // Split the string into individual arguments suitable for execv.
+ std::vector<char*> argv;
+ for (size_t i = 0; i < cmdline.size(); i += strlen(&cmdline[i]) + 1) {
+ argv.push_back(&cmdline[i]);
}
+ argv.push_back(nullptr); // Terminate the list.
+
+ pid_t pid = fork();
+ if (pid < 0) {
+ LOG(FATAL) << "Fork failed";
+ } else if (pid == 0) {
+ execv(argv[0], argv.data());
+ exit(1);
+ }
+ return pid;
+#else
+ return 0;
+#endif
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_sigstop(JNIEnv*, jclass) {
+#if __linux__
+ raise(SIGSTOP);
+#endif
+ return true; // Prevent the compiler from tail-call optimizing this method away.
}
// Helper to look for a sequence in the stack trace.
@@ -107,15 +130,8 @@ static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
}
#endif
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
- JNIEnv*,
- jobject,
- jboolean,
- jint,
- jboolean) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(JNIEnv*, jclass) {
#if __linux__
- // TODO: What to do on Valgrind?
-
std::unique_ptr<Backtrace> bt(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, GetTid()));
if (!bt->Unwind(0, nullptr)) {
printf("Cannot unwind in process.\n");
@@ -130,10 +146,10 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
// only unique functions are being expected.
// "mini-debug-info" does not include parameters to save space.
std::vector<std::string> seq = {
- "Java_Main_unwindInProcess", // This function.
- "java.util.Arrays.binarySearch0", // Framework method.
- "Base.runBase", // Method in other dex file.
- "Main.main" // The Java entry method.
+ "Java_Main_unwindInProcess", // This function.
+ "java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runTest", // Method in other dex file.
+ "Main.main" // The Java entry method.
};
bool result = CheckStack(bt.get(), seq);
@@ -152,8 +168,8 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
}
#if __linux__
-static constexpr int kSleepTimeMicroseconds = 50000; // 0.05 seconds
-static constexpr int kMaxTotalSleepTimeMicroseconds = 1000000; // 1 second
+static constexpr int kSleepTimeMicroseconds = 50000; // 0.05 seconds
+static constexpr int kMaxTotalSleepTimeMicroseconds = 10000000; // 10 seconds
// Wait for a sigstop. This code is copied from libbacktrace.
int wait_for_sigstop(pid_t tid, int* total_sleep_time_usec, bool* detach_failed ATTRIBUTE_UNUSED) {
@@ -185,18 +201,12 @@ int wait_for_sigstop(pid_t tid, int* total_sleep_time_usec, bool* detach_failed
}
#endif
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
- JNIEnv*,
- jobject,
- jboolean,
- jint pid_int) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(JNIEnv*, jclass, jint pid_int) {
#if __linux__
- // TODO: What to do on Valgrind?
pid_t pid = static_cast<pid_t>(pid_int);
- // OK, this is painful. debuggerd uses ptrace to unwind other processes.
-
- if (ptrace(PTRACE_ATTACH, pid, 0, 0)) {
+ // SEIZE is like ATTACH, but it does not stop the process (we let it stop itself).
+ if (ptrace(PTRACE_SEIZE, pid, 0, 0)) {
// Were not able to attach, bad.
printf("Failed to attach to other process.\n");
PLOG(ERROR) << "Failed to attach.";
@@ -204,13 +214,12 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
return JNI_FALSE;
}
- kill(pid, SIGSTOP);
-
bool detach_failed = false;
int total_sleep_time_usec = 0;
int signal = wait_for_sigstop(pid, &total_sleep_time_usec, &detach_failed);
- if (signal == -1) {
+ if (signal != SIGSTOP) {
LOG(WARNING) << "wait_for_sigstop failed.";
+ return JNI_FALSE;
}
std::unique_ptr<Backtrace> bt(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
@@ -227,10 +236,10 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
// See comment in unwindInProcess for non-exact stack matching.
// "mini-debug-info" does not include parameters to save space.
std::vector<std::string> seq = {
- "Java_Main_sleep", // The sleep function in the other process.
- "java.util.Arrays.binarySearch0", // Framework method.
- "Base.runBase", // Method in other dex file.
- "Main.main" // The Java entry method.
+ "Java_Main_sigstop", // The stop function in the other process.
+ "java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runTest", // Method in other dex file.
+ "Main.main" // The Java entry method.
};
result = CheckStack(bt.get(), seq);
diff --git a/test/137-cfi/expected.txt b/test/137-cfi/expected.txt
index 8db7853696..eedae8f51f 100644
--- a/test/137-cfi/expected.txt
+++ b/test/137-cfi/expected.txt
@@ -1,2 +1,7 @@
JNI_OnLoad called
+Unwind in process: PASS
JNI_OnLoad called
+Unwind other process: PASS
+JNI_OnLoad called
+JNI_OnLoad called
+Unwind other process: PASS
diff --git a/test/137-cfi/run b/test/137-cfi/run
index 9190b1cf10..4096b895ee 100755
--- a/test/137-cfi/run
+++ b/test/137-cfi/run
@@ -20,7 +20,7 @@
# there will be JITed frames on the callstack (it synchronously JITs on first use).
${RUN} "$@" -Xcompiler-option --generate-debug-info \
--runtime-option -Xjitthreshold:0 \
- --args --full-signatures --args --test-local --args --test-remote
+ --args --test-local --args --test-remote
return_status1=$?
# Test with minimal compressed debugging information.
diff --git a/test/137-cfi/src-multidex/Base.java b/test/137-cfi/src-multidex/Base.java
index d3f8a5681d..986a3c2226 100644
--- a/test/137-cfi/src-multidex/Base.java
+++ b/test/137-cfi/src-multidex/Base.java
@@ -15,8 +15,12 @@
*/
public abstract class Base {
- abstract public void runImpl();
- public void runBase() {
- runImpl();
+ public void runTest() throws Exception {
+ // Conditionally throw exception to prevent the compiler from inlining the code.
+ if (!this.getClass().getName().equals("Main")) {
+ throw new Exception("Who is calling?");
+ }
+ test();
}
+ abstract public void test();
}
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 9a2e352b8c..5b32d8e1fe 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -22,181 +22,68 @@ import java.util.Comparator;
public class Main extends Base implements Comparator<Main> {
// Whether to test local unwinding.
- private boolean testLocal;
+ private static boolean testLocal;
// Unwinding another process, modelling debuggerd.
- private boolean testRemote;
+ private static boolean testRemote;
// We fork ourself to create the secondary process for remote unwinding.
- private boolean secondary;
+ private static boolean secondary;
- // Expect the symbols to contain full method signatures including parameters.
- private boolean fullSignatures;
-
- private boolean passed;
-
- public Main(String[] args) throws Exception {
+ public static void main(String[] args) throws Exception {
System.loadLibrary(args[0]);
- for (String arg : args) {
- if (arg.equals("--test-local")) {
+ for (int i = 1; i < args.length; i++) {
+ if (args[i].equals("--test-local")) {
testLocal = true;
- }
- if (arg.equals("--test-remote")) {
+ } else if (args[i].equals("--test-remote")) {
testRemote = true;
- }
- if (arg.equals("--secondary")) {
+ } else if (args[i].equals("--secondary")) {
secondary = true;
+ } else {
+ System.out.println("Unknown argument: " + args[i]);
+ System.exit(1);
}
- if (arg.equals("--full-signatures")) {
- fullSignatures = true;
- }
- }
- if (!testLocal && !testRemote) {
- System.out.println("No test selected.");
- }
- }
-
- public static void main(String[] args) throws Exception {
- new Main(args).runBase();
- }
-
- public void runImpl() {
- if (secondary) {
- if (!testRemote) {
- throw new RuntimeException("Should not be running secondary!");
- }
- runSecondary();
- } else {
- runPrimary();
- }
- }
-
- private void runSecondary() {
- foo();
- throw new RuntimeException("Didn't expect to get back...");
- }
-
- private void runPrimary() {
- // First do the in-process unwinding.
- if (testLocal && !foo()) {
- System.out.println("Unwinding self failed.");
- }
-
- if (!testRemote) {
- // Skip the remote step.
- return;
- }
-
- // Fork the secondary.
- String[] cmdline = getCmdLine();
- String[] secCmdLine = new String[cmdline.length + 1];
- System.arraycopy(cmdline, 0, secCmdLine, 0, cmdline.length);
- secCmdLine[secCmdLine.length - 1] = "--secondary";
- Process p = exec(secCmdLine);
-
- try {
- int pid = getPid(p);
- if (pid <= 0) {
- throw new RuntimeException("Couldn't parse process");
- }
-
- // Wait until the forked process had time to run until its sleep phase.
- BufferedReader lineReader;
- try {
- InputStreamReader stdout = new InputStreamReader(p.getInputStream(), "UTF-8");
- lineReader = new BufferedReader(stdout);
- while (!lineReader.readLine().contains("Going to sleep")) {
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
-
- if (!unwindOtherProcess(fullSignatures, pid)) {
- System.out.println("Unwinding other process failed.");
-
- // In this case, log all the output.
- // Note: this is potentially non-terminating code, if the secondary is totally stuck.
- // We rely on the run-test timeout infrastructure to terminate the primary in
- // such a case.
- try {
- String tmp;
- System.out.println("Output from the secondary:");
- while ((tmp = lineReader.readLine()) != null) {
- System.out.println("Secondary: " + tmp);
- }
- } catch (Exception e) {
- e.printStackTrace(System.out);
- }
- }
-
- try {
- lineReader.close();
- } catch (Exception e) {
- e.printStackTrace(System.out);
- }
- } finally {
- // Kill the forked process if it is not already dead.
- p.destroy();
}
- }
- private static Process exec(String[] args) {
- try {
- return Runtime.getRuntime().exec(args);
- } catch (Exception exc) {
- throw new RuntimeException(exc);
- }
+ // Call test() via base class to test unwinding through multidex.
+ new Main().runTest();
}
- private static int getPid(Process p) {
- // Could do reflection for the private pid field, but String parsing is easier.
- String s = p.toString();
- if (s.startsWith("Process[pid=")) {
- return Integer.parseInt(s.substring("Process[pid=".length(), s.indexOf(",")));
- } else {
- return -1;
- }
- }
-
- // Read /proc/self/cmdline to find the invocation command line (so we can fork another runtime).
- private static String[] getCmdLine() {
- try {
- BufferedReader in = new BufferedReader(new FileReader("/proc/self/cmdline"));
- String s = in.readLine();
- in.close();
- return s.split("\0");
- } catch (Exception exc) {
- throw new RuntimeException(exc);
- }
- }
-
- public boolean foo() {
- // Call bar via Arrays.binarySearch.
- // This tests that we can unwind from framework code.
+ public void test() {
+ // Call unwind() via Arrays.binarySearch to test unwinding through framework.
Main[] array = { this, this, this };
Arrays.binarySearch(array, 0, 3, this /* value */, this /* comparator */);
- return passed;
}
public int compare(Main lhs, Main rhs) {
- passed = bar(secondary);
+ unwind();
// Returning "equal" ensures that we terminate search
- // after first item and thus call bar() only once.
+ // after first item and thus call unwind() only once.
return 0;
}
- public boolean bar(boolean b) {
- if (b) {
- return sleep(2, b, 1.0);
- } else {
- return unwindInProcess(fullSignatures, 1, b);
+ public void unwind() {
+ if (secondary) {
+ sigstop(); // This is helper child process. Stop and wait for unwinding.
+ return; // Don't run the tests again in the secondary helper process.
}
- }
- // Native functions. Note: to avoid deduping, they must all have different signatures.
+ if (testLocal) {
+ String result = unwindInProcess() ? "PASS" : "FAIL";
+ System.out.println("Unwind in process: " + result);
+ }
- public native boolean sleep(int i, boolean b, double dummy);
+ if (testRemote) {
+ // Start a secondary helper process. It will stop itself when it is ready.
+ int pid = startSecondaryProcess();
+ // Wait for the secondary process to stop and then unwind it remotely.
+ String result = unwindOtherProcess(pid) ? "PASS" : "FAIL";
+ System.out.println("Unwind other process: " + result);
+ }
+ }
- public native boolean unwindInProcess(boolean fullSignatures, int i, boolean b);
- public native boolean unwindOtherProcess(boolean fullSignatures, int pid);
+ public static native int startSecondaryProcess();
+ public static native boolean sigstop();
+ public static native boolean unwindInProcess();
+ public static native boolean unwindOtherProcess(int pid);
}
diff --git a/test/138-duplicate-classes-check2/build b/test/138-duplicate-classes-check2/build
index 76d535abf1..4ab7320699 100755
--- a/test/138-duplicate-classes-check2/build
+++ b/test/138-duplicate-classes-check2/build
@@ -17,16 +17,22 @@
# Stop if something fails.
set -e
-mkdir classes
-${JAVAC} -d classes `find src -name '*.java'`
-
-mkdir classes-ex
-${JAVAC} -d classes-ex `find src-ex -name '*.java'`
-rm classes-ex/A.class
-
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} -JXmx256m --debug --dex --dump-to=classes.lst --output=classes.dex --dump-width=1000 classes
- zip ${TEST_NAME}.jar classes.dex
- ${DX} -JXmx256m --debug --dex --dump-to=classes-ex.lst --output=classes.dex --dump-width=1000 classes-ex
- zip ${TEST_NAME}-ex.jar classes.dex
-fi
+export ORIGINAL_JAVAC="$JAVAC"
+
+# Wrapper function for javac which invokes the compiler and applies
+# additional setup steps for the test.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
+
+ $ORIGINAL_JAVAC "$@"
+
+ # Remove one A.class from classes-ex
+ rm -f classes-ex/A.class
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+./default-build "$@"
diff --git a/test/1948-obsolete-const-method-handle/build b/test/1948-obsolete-const-method-handle/build
index ac0dcd97b8..d0e7a8c0d8 100644
--- a/test/1948-obsolete-const-method-handle/build
+++ b/test/1948-obsolete-const-method-handle/build
@@ -20,6 +20,4 @@ set -e
mkdir classes
./util-src/build-classes $PWD/classes
-${DX} --dex --min-sdk-version=28 --output=classes.dex classes
-
-zip $TEST_NAME.jar classes.dex
+./default-build --api-level 28 "$@"
diff --git a/test/303-verification-stress/build b/test/303-verification-stress/build
index ba79541478..87a4a851d7 100644
--- a/test/303-verification-stress/build
+++ b/test/303-verification-stress/build
@@ -21,11 +21,4 @@ set -e
gcc -Wall -Werror -o classes-gen classes-gen.c
./classes-gen
-mkdir classes
-${JAVAC} -d classes src/*.java
-
-# dx needs more memory for that test so do not pass Xmx option here.
-if [ ${NEED_DEX} = "true" ]; then
- ${DX} --debug --dex --output=classes.dex classes
- zip $TEST_NAME.jar classes.dex
-fi
+./default-build "$@"
diff --git a/test/411-checker-hdiv-hrem-pow2/expected.txt b/test/411-checker-hdiv-hrem-pow2/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/411-checker-hdiv-hrem-pow2/expected.txt
diff --git a/test/411-checker-hdiv-hrem-pow2/info.txt b/test/411-checker-hdiv-hrem-pow2/info.txt
new file mode 100644
index 0000000000..df1c988052
--- /dev/null
+++ b/test/411-checker-hdiv-hrem-pow2/info.txt
@@ -0,0 +1,2 @@
+Test the optimization of integer division and remainder instructions when
+the denominator is power of 2 on arm64.
diff --git a/test/411-checker-hdiv-hrem-pow2/src/DivTest.java b/test/411-checker-hdiv-hrem-pow2/src/DivTest.java
new file mode 100644
index 0000000000..a3882e7c15
--- /dev/null
+++ b/test/411-checker-hdiv-hrem-pow2/src/DivTest.java
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class DivTest {
+
+ public static <T extends Number> void expectEquals(T expected, T result) {
+ if (!expected.equals(result)) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main() {
+ divInt();
+ divLong();
+ }
+
+ private static void divInt() {
+ expectEquals(0, $noinline$IntDivBy2(0));
+ expectEquals(0, $noinline$IntDivBy2(1));
+ expectEquals(0, $noinline$IntDivBy2(-1));
+ expectEquals(1, $noinline$IntDivBy2(2));
+ expectEquals(-1, $noinline$IntDivBy2(-2));
+ expectEquals(1, $noinline$IntDivBy2(3));
+ expectEquals(-1, $noinline$IntDivBy2(-3));
+ expectEquals(3, $noinline$IntDivBy2(7));
+ expectEquals(-3, $noinline$IntDivBy2(-7));
+ expectEquals(4, $noinline$IntDivBy2(8));
+ expectEquals(-4, $noinline$IntDivBy2(-8));
+ expectEquals(7, $noinline$IntDivBy2(0x0f));
+ expectEquals(0x007f, $noinline$IntDivBy2(0x00ff));
+ expectEquals(0x07ff, $noinline$IntDivBy2(0x0fff));
+ expectEquals(0x007fff, $noinline$IntDivBy2(0x00ffff));
+ expectEquals(0x3fffffff, $noinline$IntDivBy2(Integer.MAX_VALUE));
+ expectEquals(0xc0000000, $noinline$IntDivBy2(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntDivByMinus2(0));
+ expectEquals(0, $noinline$IntDivByMinus2(1));
+ expectEquals(0, $noinline$IntDivByMinus2(-1));
+ expectEquals(-1, $noinline$IntDivByMinus2(2));
+ expectEquals(1, $noinline$IntDivByMinus2(-2));
+ expectEquals(-1, $noinline$IntDivByMinus2(3));
+ expectEquals(1, $noinline$IntDivByMinus2(-3));
+ expectEquals(-3, $noinline$IntDivByMinus2(7));
+ expectEquals(3, $noinline$IntDivByMinus2(-7));
+ expectEquals(-4, $noinline$IntDivByMinus2(8));
+ expectEquals(4, $noinline$IntDivByMinus2(-8));
+ expectEquals(-7, $noinline$IntDivByMinus2(0x0f));
+ expectEquals(0xffffff81, $noinline$IntDivByMinus2(0x00ff));
+ expectEquals(0xfffff801, $noinline$IntDivByMinus2(0x0fff));
+ expectEquals(0xffff8001, $noinline$IntDivByMinus2(0x00ffff));
+ expectEquals(0xc0000001, $noinline$IntDivByMinus2(Integer.MAX_VALUE));
+ expectEquals(0x40000000, $noinline$IntDivByMinus2(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntDivBy16(0));
+ expectEquals(1, $noinline$IntDivBy16(16));
+ expectEquals(-1, $noinline$IntDivBy16(-16));
+ expectEquals(2, $noinline$IntDivBy16(33));
+ expectEquals(0x000f, $noinline$IntDivBy16(0x00ff));
+ expectEquals(0x00ff, $noinline$IntDivBy16(0x0fff));
+ expectEquals(0x000fff, $noinline$IntDivBy16(0x00ffff));
+ expectEquals(0x07ffffff, $noinline$IntDivBy16(Integer.MAX_VALUE));
+ expectEquals(0xf8000000, $noinline$IntDivBy16(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntDivByMinus16(0));
+ expectEquals(-1, $noinline$IntDivByMinus16(16));
+ expectEquals(1, $noinline$IntDivByMinus16(-16));
+ expectEquals(-2, $noinline$IntDivByMinus16(33));
+ expectEquals(0xfffffff1, $noinline$IntDivByMinus16(0x00ff));
+ expectEquals(0xffffff01, $noinline$IntDivByMinus16(0x0fff));
+ expectEquals(0xfffff001, $noinline$IntDivByMinus16(0x00ffff));
+ expectEquals(0xf8000001, $noinline$IntDivByMinus16(Integer.MAX_VALUE));
+ expectEquals(0x08000000, $noinline$IntDivByMinus16(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntDivByIntMin(0));
+ expectEquals(0, $noinline$IntDivByIntMin(1));
+ expectEquals(0, $noinline$IntDivByIntMin(-1));
+ expectEquals(1, $noinline$IntDivByIntMin(Integer.MIN_VALUE));
+ expectEquals(0, $noinline$IntDivByIntMin(Integer.MAX_VALUE));
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivBy2(int) disassembly (after)
+ /// CHECK: add w{{\d+}}, w{{\d+}}, w{{\d+}}, lsr #31
+ /// CHECK: asr w{{\d+}}, w{{\d+}}, #1
+ private static Integer $noinline$IntDivBy2(int v) {
+ int r = v / 2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivByMinus2(int) disassembly (after)
+ /// CHECK: add w{{\d+}}, w{{\d+}}, w{{\d+}}, lsr #31
+ /// CHECK: neg w{{\d+}}, w{{\d+}}, asr #1
+ private static Integer $noinline$IntDivByMinus2(int v) {
+ int r = v / -2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivBy16(int) disassembly (after)
+ /// CHECK: add w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: cmp w{{\d+}}, #0x0
+ /// CHECK: csel w{{\d+}}, w{{\d+}}, w{{\d+}}, lt
+ /// CHECK: asr w{{\d+}}, w{{\d+}}, #4
+ private static Integer $noinline$IntDivBy16(int v) {
+ int r = v / 16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivByMinus16(int) disassembly (after)
+ /// CHECK: add w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: cmp w{{\d+}}, #0x0
+ /// CHECK: csel w{{\d+}}, w{{\d+}}, w{{\d+}}, lt
+ /// CHECK: neg w{{\d+}}, w{{\d+}}, asr #4
+ private static Integer $noinline$IntDivByMinus16(int v) {
+ int r = v / -16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer DivTest.$noinline$IntDivByIntMin(int) disassembly (after)
+ /// CHECK: mov w{{\d+}}, #0x7fffffff
+ /// CHECK: add w{{\d+}}, w{{\d+}}, w{{\d+}}
+ /// CHECK: cmp w{{\d+}}, #0x0
+ /// CHECK: csel w{{\d+}}, w{{\d+}}, w{{\d+}}, lt
+ /// CHECK: neg w{{\d+}}, w{{\d+}}, asr #31
+ private static Integer $noinline$IntDivByIntMin(int v) {
+ int r = v / Integer.MIN_VALUE;
+ return r;
+ }
+
+ private static void divLong() {
+ expectEquals(0L, $noinline$LongDivBy2(0L));
+ expectEquals(0L, $noinline$LongDivBy2(1L));
+ expectEquals(0L, $noinline$LongDivBy2(-1L));
+ expectEquals(1L, $noinline$LongDivBy2(2L));
+ expectEquals(-1L, $noinline$LongDivBy2(-2L));
+ expectEquals(1L, $noinline$LongDivBy2(3L));
+ expectEquals(-1L, $noinline$LongDivBy2(-3L));
+ expectEquals(3L, $noinline$LongDivBy2(7L));
+ expectEquals(-3L, $noinline$LongDivBy2(-7L));
+ expectEquals(4L, $noinline$LongDivBy2(8L));
+ expectEquals(-4L, $noinline$LongDivBy2(-8L));
+ expectEquals(7L, $noinline$LongDivBy2(0x0fL));
+ expectEquals(0x007fL, $noinline$LongDivBy2(0x00ffL));
+ expectEquals(0x07ffL, $noinline$LongDivBy2(0x0fffL));
+ expectEquals(0x007fffL, $noinline$LongDivBy2(0x00ffffL));
+ expectEquals(0x3fffffffffffffffL, $noinline$LongDivBy2(Long.MAX_VALUE));
+ expectEquals(0xc000000000000000L, $noinline$LongDivBy2(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongDivByMinus2(0));
+ expectEquals(0L, $noinline$LongDivByMinus2(1L));
+ expectEquals(0L, $noinline$LongDivByMinus2(-1L));
+ expectEquals(-1L, $noinline$LongDivByMinus2(2L));
+ expectEquals(1L, $noinline$LongDivByMinus2(-2L));
+ expectEquals(-1L, $noinline$LongDivByMinus2(3L));
+ expectEquals(1L, $noinline$LongDivByMinus2(-3L));
+ expectEquals(-3L, $noinline$LongDivByMinus2(7L));
+ expectEquals(3L, $noinline$LongDivByMinus2(-7L));
+ expectEquals(-4L, $noinline$LongDivByMinus2(8L));
+ expectEquals(4L, $noinline$LongDivByMinus2(-8L));
+ expectEquals(-7L, $noinline$LongDivByMinus2(0x0fL));
+ expectEquals(0xffffffffffffff81L, $noinline$LongDivByMinus2(0x00ffL));
+ expectEquals(0xfffffffffffff801L, $noinline$LongDivByMinus2(0x0fffL));
+ expectEquals(0xffffffffffff8001L, $noinline$LongDivByMinus2(0x00ffffL));
+ expectEquals(0xc000000000000001L, $noinline$LongDivByMinus2(Long.MAX_VALUE));
+ expectEquals(0x4000000000000000L, $noinline$LongDivByMinus2(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongDivBy16(0));
+ expectEquals(1L, $noinline$LongDivBy16(16L));
+ expectEquals(-1L, $noinline$LongDivBy16(-16L));
+ expectEquals(2L, $noinline$LongDivBy16(33L));
+ expectEquals(0x000fL, $noinline$LongDivBy16(0x00ffL));
+ expectEquals(0x00ffL, $noinline$LongDivBy16(0x0fffL));
+ expectEquals(0x000fffL, $noinline$LongDivBy16(0x00ffffL));
+ expectEquals(0x07ffffffffffffffL, $noinline$LongDivBy16(Long.MAX_VALUE));
+ expectEquals(0xf800000000000000L, $noinline$LongDivBy16(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongDivByMinus16(0));
+ expectEquals(-1L, $noinline$LongDivByMinus16(16L));
+ expectEquals(1L, $noinline$LongDivByMinus16(-16L));
+ expectEquals(-2L, $noinline$LongDivByMinus16(33L));
+ expectEquals(0xfffffffffffffff1L, $noinline$LongDivByMinus16(0x00ffL));
+ expectEquals(0xffffffffffffff01L, $noinline$LongDivByMinus16(0x0fffL));
+ expectEquals(0xfffffffffffff001L, $noinline$LongDivByMinus16(0x00ffffL));
+ expectEquals(0xf800000000000001L, $noinline$LongDivByMinus16(Long.MAX_VALUE));
+ expectEquals(0x0800000000000000L, $noinline$LongDivByMinus16(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongDivByLongMin(0));
+ expectEquals(0L, $noinline$LongDivByLongMin(1));
+ expectEquals(0L, $noinline$LongDivByLongMin(-1));
+ expectEquals(1L, $noinline$LongDivByLongMin(Long.MIN_VALUE));
+ expectEquals(0L, $noinline$LongDivByLongMin(Long.MAX_VALUE));
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivBy2(long) disassembly (after)
+ /// CHECK: add x{{\d+}}, x{{\d+}}, x{{\d+}}, lsr #63
+ /// CHECK: asr x{{\d+}}, x{{\d+}}, #1
+ private static Long $noinline$LongDivBy2(long v) {
+ long r = v / 2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivByMinus2(long) disassembly (after)
+ /// CHECK: add x{{\d+}}, x{{\d+}}, x{{\d+}}, lsr #63
+ /// CHECK: neg x{{\d+}}, x{{\d+}}, asr #1
+ private static Long $noinline$LongDivByMinus2(long v) {
+ long r = v / -2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivBy16(long) disassembly (after)
+ /// CHECK: add x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: cmp x{{\d+}}, #0x0
+ /// CHECK: csel x{{\d+}}, x{{\d+}}, x{{\d+}}, lt
+ /// CHECK: asr x{{\d+}}, x{{\d+}}, #4
+ private static Long $noinline$LongDivBy16(long v) {
+ long r = v / 16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivByMinus16(long) disassembly (after)
+ /// CHECK: add x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: cmp x{{\d+}}, #0x0
+ /// CHECK: csel x{{\d+}}, x{{\d+}}, x{{\d+}}, lt
+ /// CHECK: neg x{{\d+}}, x{{\d+}}, asr #4
+ private static Long $noinline$LongDivByMinus16(long v) {
+ long r = v / -16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long DivTest.$noinline$LongDivByLongMin(long) disassembly (after)
+ /// CHECK: mov x{{\d+}}, #0x7fffffffffffffff
+ /// CHECK: add x{{\d+}}, x{{\d+}}, x{{\d+}}
+ /// CHECK: cmp x{{\d+}}, #0x0
+ /// CHECK: csel x{{\d+}}, x{{\d+}}, x{{\d+}}, lt
+ /// CHECK: neg x{{\d+}}, x{{\d+}}, asr #63
+ private static Long $noinline$LongDivByLongMin(long v) {
+ long r = v / Long.MIN_VALUE;
+ return r;
+ }
+}
diff --git a/test/411-checker-hdiv-hrem-pow2/src/Main.java b/test/411-checker-hdiv-hrem-pow2/src/Main.java
new file mode 100644
index 0000000000..4b34bf1af4
--- /dev/null
+++ b/test/411-checker-hdiv-hrem-pow2/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String args[]) {
+ DivTest.main();
+ RemTest.main();
+ }
+}
diff --git a/test/411-checker-hdiv-hrem-pow2/src/RemTest.java b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
new file mode 100644
index 0000000000..72725c1cd4
--- /dev/null
+++ b/test/411-checker-hdiv-hrem-pow2/src/RemTest.java
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class RemTest {
+
+ public static <T extends Number> void expectEquals(T expected, T result) {
+ if (!expected.equals(result)) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void main() {
+ remInt();
+ remLong();
+ }
+
+ private static void remInt() {
+ expectEquals(0, $noinline$IntMod2(0));
+ expectEquals(1, $noinline$IntMod2(1));
+ expectEquals(-1, $noinline$IntMod2(-1));
+ expectEquals(0, $noinline$IntMod2(2));
+ expectEquals(0, $noinline$IntMod2(-2));
+ expectEquals(1, $noinline$IntMod2(3));
+ expectEquals(-1, $noinline$IntMod2(-3));
+ expectEquals(1, $noinline$IntMod2(0x0f));
+ expectEquals(1, $noinline$IntMod2(0x00ff));
+ expectEquals(1, $noinline$IntMod2(0x00ffff));
+ expectEquals(1, $noinline$IntMod2(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$IntMod2(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntModMinus2(0));
+ expectEquals(1, $noinline$IntModMinus2(1));
+ expectEquals(-1, $noinline$IntModMinus2(-1));
+ expectEquals(0, $noinline$IntModMinus2(2));
+ expectEquals(0, $noinline$IntModMinus2(-2));
+ expectEquals(1, $noinline$IntModMinus2(3));
+ expectEquals(-1, $noinline$IntModMinus2(-3));
+ expectEquals(1, $noinline$IntModMinus2(0x0f));
+ expectEquals(1, $noinline$IntModMinus2(0x00ff));
+ expectEquals(1, $noinline$IntModMinus2(0x00ffff));
+ expectEquals(1, $noinline$IntModMinus2(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$IntModMinus2(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntMod16(0));
+ expectEquals(1, $noinline$IntMod16(1));
+ expectEquals(1, $noinline$IntMod16(17));
+ expectEquals(-1, $noinline$IntMod16(-1));
+ expectEquals(0, $noinline$IntMod16(32));
+ expectEquals(0, $noinline$IntMod16(-32));
+ expectEquals(0x0f, $noinline$IntMod16(0x0f));
+ expectEquals(0x0f, $noinline$IntMod16(0x00ff));
+ expectEquals(0x0f, $noinline$IntMod16(0x00ffff));
+ expectEquals(15, $noinline$IntMod16(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$IntMod16(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntModMinus16(0));
+ expectEquals(1, $noinline$IntModMinus16(1));
+ expectEquals(1, $noinline$IntModMinus16(17));
+ expectEquals(-1, $noinline$IntModMinus16(-1));
+ expectEquals(0, $noinline$IntModMinus16(32));
+ expectEquals(0, $noinline$IntModMinus16(-32));
+ expectEquals(0x0f, $noinline$IntModMinus16(0x0f));
+ expectEquals(0x0f, $noinline$IntModMinus16(0x00ff));
+ expectEquals(0x0f, $noinline$IntModMinus16(0x00ffff));
+ expectEquals(15, $noinline$IntModMinus16(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$IntModMinus16(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$IntModIntMin(0));
+ expectEquals(1, $noinline$IntModIntMin(1));
+ expectEquals(0, $noinline$IntModIntMin(Integer.MIN_VALUE));
+ expectEquals(-1, $noinline$IntModIntMin(-1));
+ expectEquals(0x0f, $noinline$IntModIntMin(0x0f));
+ expectEquals(0x00ff, $noinline$IntModIntMin(0x00ff));
+ expectEquals(0x00ffff, $noinline$IntModIntMin(0x00ffff));
+ expectEquals(Integer.MAX_VALUE, $noinline$IntModIntMin(Integer.MAX_VALUE));
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer RemTest.$noinline$IntMod2(int) disassembly (after)
+ /// CHECK: cmp w{{\d+}}, #0x0
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0x1
+ /// CHECK: cneg w{{\d+}}, w{{\d+}}, lt
+ private static Integer $noinline$IntMod2(int v) {
+ int r = v % 2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer RemTest.$noinline$IntModMinus2(int) disassembly (after)
+ /// CHECK: cmp w{{\d+}}, #0x0
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0x1
+ /// CHECK: cneg w{{\d+}}, w{{\d+}}, lt
+ private static Integer $noinline$IntModMinus2(int v) {
+ int r = v % -2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer RemTest.$noinline$IntMod16(int) disassembly (after)
+ /// CHECK: negs w{{\d+}}, w{{\d+}}
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ private static Integer $noinline$IntMod16(int v) {
+ int r = v % 16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer RemTest.$noinline$IntModMinus16(int) disassembly (after)
+ /// CHECK: negs w{{\d+}}, w{{\d+}}
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0xf
+ /// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ private static Integer $noinline$IntModMinus16(int v) {
+ int r = v % -16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Integer RemTest.$noinline$IntModIntMin(int) disassembly (after)
+ /// CHECK: negs w{{\d+}}, w{{\d+}}
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0x7fffffff
+ /// CHECK: and w{{\d+}}, w{{\d+}}, #0x7fffffff
+ /// CHECK: csneg w{{\d+}}, w{{\d+}}, mi
+ private static Integer $noinline$IntModIntMin(int v) {
+ int r = v % Integer.MIN_VALUE;
+ return r;
+ }
+
+ private static void remLong() {
+ expectEquals(0L, $noinline$LongMod2(0));
+ expectEquals(1L, $noinline$LongMod2(1));
+ expectEquals(-1L, $noinline$LongMod2(-1));
+ expectEquals(0L, $noinline$LongMod2(2));
+ expectEquals(0L, $noinline$LongMod2(-2));
+ expectEquals(1L, $noinline$LongMod2(3));
+ expectEquals(-1L, $noinline$LongMod2(-3));
+ expectEquals(1L, $noinline$LongMod2(0x0f));
+ expectEquals(1L, $noinline$LongMod2(0x00ff));
+ expectEquals(1L, $noinline$LongMod2(0x00ffff));
+ expectEquals(1L, $noinline$LongMod2(0x00ffffff));
+ expectEquals(1L, $noinline$LongMod2(0x00ffffffffL));
+ expectEquals(1L, $noinline$LongMod2(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$LongMod2(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongModMinus2(0));
+ expectEquals(1L, $noinline$LongModMinus2(1));
+ expectEquals(-1L, $noinline$LongModMinus2(-1));
+ expectEquals(0L, $noinline$LongModMinus2(2));
+ expectEquals(0L, $noinline$LongModMinus2(-2));
+ expectEquals(1L, $noinline$LongModMinus2(3));
+ expectEquals(-1L, $noinline$LongModMinus2(-3));
+ expectEquals(1L, $noinline$LongModMinus2(0x0f));
+ expectEquals(1L, $noinline$LongModMinus2(0x00ff));
+ expectEquals(1L, $noinline$LongModMinus2(0x00ffff));
+ expectEquals(1L, $noinline$LongModMinus2(0x00ffffff));
+ expectEquals(1L, $noinline$LongModMinus2(0x00ffffffffL));
+ expectEquals(1L, $noinline$LongModMinus2(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$LongModMinus2(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongMod16(0));
+ expectEquals(1L, $noinline$LongMod16(1));
+ expectEquals(1L, $noinline$LongMod16(17));
+ expectEquals(-1L, $noinline$LongMod16(-1));
+ expectEquals(0L, $noinline$LongMod16(32));
+ expectEquals(0L, $noinline$LongMod16(-32));
+ expectEquals(0x0fL, $noinline$LongMod16(0x0f));
+ expectEquals(0x0fL, $noinline$LongMod16(0x00ff));
+ expectEquals(0x0fL, $noinline$LongMod16(0x00ffff));
+ expectEquals(0x0fL, $noinline$LongMod16(0x00ffffff));
+ expectEquals(0x0fL, $noinline$LongMod16(0x00ffffffffL));
+ expectEquals(15L, $noinline$LongMod16(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$LongMod16(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongModMinus16(0));
+ expectEquals(1L, $noinline$LongModMinus16(1));
+ expectEquals(1L, $noinline$LongModMinus16(17));
+ expectEquals(-1L, $noinline$LongModMinus16(-1));
+ expectEquals(0L, $noinline$LongModMinus16(32));
+ expectEquals(0L, $noinline$LongModMinus16(-32));
+ expectEquals(0x0fL, $noinline$LongModMinus16(0x0f));
+ expectEquals(0x0fL, $noinline$LongModMinus16(0x00ff));
+ expectEquals(0x0fL, $noinline$LongModMinus16(0x00ffff));
+ expectEquals(0x0fL, $noinline$LongModMinus16(0x00ffffff));
+ expectEquals(0x0fL, $noinline$LongModMinus16(0x00ffffffffL));
+ expectEquals(15L, $noinline$LongModMinus16(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$LongModMinus16(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$LongModLongMin(0));
+ expectEquals(1L, $noinline$LongModLongMin(1));
+ expectEquals(0L, $noinline$LongModLongMin(Long.MIN_VALUE));
+ expectEquals(-1L, $noinline$LongModLongMin(-1));
+ expectEquals(0x0fL, $noinline$LongModLongMin(0x0f));
+ expectEquals(0x00ffL, $noinline$LongModLongMin(0x00ff));
+ expectEquals(0x00ffffL, $noinline$LongModLongMin(0x00ffff));
+ expectEquals(0x00ffffffL, $noinline$LongModLongMin(0x00ffffff));
+ expectEquals(0x00ffffffffL, $noinline$LongModLongMin(0x00ffffffffL));
+ expectEquals(Long.MAX_VALUE, $noinline$LongModLongMin(Long.MAX_VALUE));
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long RemTest.$noinline$LongMod2(long) disassembly (after)
+ /// CHECK: cmp x{{\d+}}, #0x0
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK: cneg x{{\d+}}, x{{\d+}}, lt
+ private static Long $noinline$LongMod2(long v) {
+ long r = v % 2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long RemTest.$noinline$LongModMinus2(long) disassembly (after)
+ /// CHECK: cmp x{{\d+}}, #0x0
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK: cneg x{{\d+}}, x{{\d+}}, lt
+ private static Long $noinline$LongModMinus2(long v) {
+ long r = v % -2;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long RemTest.$noinline$LongMod16(long) disassembly (after)
+ /// CHECK: negs x{{\d+}}, x{{\d+}}
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+ private static Long $noinline$LongMod16(long v) {
+ long r = v % 16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long RemTest.$noinline$LongModMinus16(long) disassembly (after)
+ /// CHECK: negs x{{\d+}}, x{{\d+}}
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0xf
+ /// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+ private static Long $noinline$LongModMinus16(long v) {
+ long r = v % -16;
+ return r;
+ }
+
+ /// CHECK-START-ARM64: java.lang.Long RemTest.$noinline$LongModLongMin(long) disassembly (after)
+ /// CHECK: negs x{{\d+}}, x{{\d+}}
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
+ /// CHECK: and x{{\d+}}, x{{\d+}}, #0x7fffffffffffffff
+ /// CHECK: csneg x{{\d+}}, x{{\d+}}, mi
+ private static Long $noinline$LongModLongMin(long v) {
+ long r = v % Long.MIN_VALUE;
+ return r;
+ }
+}
diff --git a/test/442-checker-constant-folding/build b/test/442-checker-constant-folding/build
deleted file mode 100755
index 42b99ad9f8..0000000000
--- a/test/442-checker-constant-folding/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export DX=$ANDROID_HOST_OUT/bin/dx
-
-./default-build "$@"
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 40e3778109..9e714f5111 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -2458,6 +2458,77 @@ public class Main {
return (byte)((int)(((long)(b & 0xff)) & 255L));
}
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOf(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Empty:l\d+>> LoadString
+ /// CHECK-DAG: <<Equals:i\d+>> InvokeVirtual [<<Empty>>,<<Arg>>] intrinsic:StringIndexOf
+ /// CHECK-DAG: Return [<<Equals>>]
+
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOf(int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual
+
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOf(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Minus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: Return [<<Minus1>>]
+ public static int $noinline$emptyStringIndexOf(int ch) {
+ return "".indexOf(ch);
+ }
+
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOfAfter(int, int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Empty:l\d+>> LoadString
+ /// CHECK-DAG: <<Equals:i\d+>> InvokeVirtual [<<Empty>>,<<Arg1>>,<<Arg2>>] intrinsic:StringIndexOfAfter
+ /// CHECK-DAG: Return [<<Equals>>]
+
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOfAfter(int, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual
+
+ /// CHECK-START: int Main.$noinline$emptyStringIndexOfAfter(int, int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Minus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: Return [<<Minus1>>]
+ public static int $noinline$emptyStringIndexOfAfter(int ch, int fromIndex) {
+ return "".indexOf(ch, fromIndex);
+ }
+
+ /// CHECK-START: int Main.$noinline$singleCharStringIndexOf(int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Empty:l\d+>> LoadString
+ /// CHECK-DAG: <<Equals:i\d+>> InvokeVirtual [<<Empty>>,<<Arg>>] intrinsic:StringIndexOf
+ /// CHECK-DAG: Return [<<Equals>>]
+
+ /// CHECK-START: int Main.$noinline$singleCharStringIndexOf(int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeVirtual
+
+ /// CHECK-START: int Main.$noinline$singleCharStringIndexOf(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<x:i\d+>> IntConstant 120
+ /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+ /// CHECK-DAG: <<Minus1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Eq:z\d+>> Equal [<<Arg>>,<<x>>]
+ /// CHECK-DAG: <<Select:i\d+>> Select [<<Minus1>>,<<Zero>>,<<Eq>>]
+ /// CHECK-DAG: Return [<<Select>>]
+ public static int $noinline$singleCharStringIndexOf(int ch) {
+ return "x".indexOf(ch);
+ }
+
+ /// CHECK-START: int Main.$noinline$singleCharStringIndexOfAfter(int, int) instruction_simplifier (before)
+ /// CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Empty:l\d+>> LoadString
+ /// CHECK-DAG: <<Equals:i\d+>> InvokeVirtual [<<Empty>>,<<Arg1>>,<<Arg2>>] intrinsic:StringIndexOfAfter
+ /// CHECK-DAG: Return [<<Equals>>]
+
+ /// CHECK-START: int Main.$noinline$singleCharStringIndexOfAfter(int, int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Arg1:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Arg2:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Empty:l\d+>> LoadString
+ /// CHECK-DAG: <<Equals:i\d+>> InvokeVirtual [<<Empty>>,<<Arg1>>,<<Arg2>>] intrinsic:StringIndexOfAfter
+ /// CHECK-DAG: Return [<<Equals>>]
+ public static int $noinline$singleCharStringIndexOfAfter(int ch, int fromIndex) {
+ return "x".indexOf(ch, fromIndex); // Not simplified.
+ }
+
public static void main(String[] args) throws Exception {
Class smaliTests2 = Class.forName("SmaliTests2");
Method $noinline$XorAllOnes = smaliTests2.getMethod("$noinline$XorAllOnes", int.class);
@@ -2709,6 +2780,19 @@ public class Main {
assertIntEquals(1, (int)$noinline$bug68142795Boolean.invoke(null, true));
assertIntEquals(0x7f, $noinline$bug68142795Elaborate((byte) 0x7f));
assertIntEquals((byte) 0x80, $noinline$bug68142795Elaborate((byte) 0x80));
+
+ assertIntEquals(-1, $noinline$emptyStringIndexOf('a'));
+ assertIntEquals(-1, $noinline$emptyStringIndexOf('Z'));
+ assertIntEquals(-1, $noinline$emptyStringIndexOfAfter('a', 0));
+ assertIntEquals(-1, $noinline$emptyStringIndexOfAfter('Z', -1));
+
+ assertIntEquals(-1, $noinline$singleCharStringIndexOf('a'));
+ assertIntEquals(0, $noinline$singleCharStringIndexOf('x'));
+ assertIntEquals(-1, $noinline$singleCharStringIndexOf('Z'));
+ assertIntEquals(-1, $noinline$singleCharStringIndexOfAfter('a', 0));
+ assertIntEquals(0, $noinline$singleCharStringIndexOfAfter('x', -1));
+ assertIntEquals(-1, $noinline$singleCharStringIndexOfAfter('x', 1));
+ assertIntEquals(-1, $noinline$singleCharStringIndexOfAfter('Z', -1));
}
private static boolean $inline$true() { return true; }
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 44ea0c9877..58ffe04fee 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -36,32 +36,46 @@ class TestVisitor : public StackVisitor {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
- if (m_name.compare("testLiveArgument") == 0) {
+ if (m_name.compare("$noinline$testLiveArgument") == 0) {
+ found_method_ = true;
+ CHECK_EQ(CodeItemDataAccessor(m->DexInstructionData()).RegistersSize(), 3u);
+ CheckOptimizedOutRegLiveness(m, 1, kIntVReg, true, 42);
+
+ uint32_t value;
+ CHECK(GetVReg(m, 2, kReferenceVReg, &value));
+ } else if (m_name.compare("$noinline$testIntervalHole") == 0) {
found_method_ = true;
- uint32_t value = 0;
- CHECK(GetVReg(m, 0, kIntVReg, &value));
- CHECK_EQ(value, 42u);
- } else if (m_name.compare("$opt$noinline$testIntervalHole") == 0) {
uint32_t number_of_dex_registers =
CodeItemDataAccessor(m->DexInstructionData()).RegistersSize();
uint32_t dex_register_of_first_parameter = number_of_dex_registers - 2;
+ CheckOptimizedOutRegLiveness(m, dex_register_of_first_parameter, kIntVReg, true, 1);
+ } else if (m_name.compare("$noinline$testCodeSinking") == 0) {
found_method_ = true;
- uint32_t value = 0;
- if (GetCurrentQuickFrame() != nullptr &&
- GetCurrentOatQuickMethodHeader()->IsOptimized() &&
- !Runtime::Current()->IsJavaDebuggable()) {
- CHECK_EQ(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value), false);
- } else {
- CHECK(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value));
- CHECK_EQ(value, 1u);
- }
+ CheckOptimizedOutRegLiveness(m, 0, kReferenceVReg);
}
return true;
}
- // Value returned to Java to ensure the methods testSimpleVReg and testPairVReg
- // have been found and tested.
+ void CheckOptimizedOutRegLiveness(ArtMethod* m,
+ uint32_t dex_reg,
+ VRegKind vreg_kind,
+ bool check_val = false,
+ uint32_t expected = 0) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t value = 0;
+ if (GetCurrentQuickFrame() != nullptr &&
+ GetCurrentOatQuickMethodHeader()->IsOptimized() &&
+ !Runtime::Current()->IsJavaDebuggable()) {
+ CHECK_EQ(GetVReg(m, dex_reg, vreg_kind, &value), false);
+ } else {
+ CHECK(GetVReg(m, dex_reg, vreg_kind, &value));
+ if (check_val) {
+ CHECK_EQ(value, expected);
+ }
+ }
+ }
+
+ // Value returned to Java to ensure the required methods have been found and tested.
bool found_method_ = false;
};
diff --git a/test/466-get-live-vreg/src/Main.java b/test/466-get-live-vreg/src/Main.java
index 19032601fa..29a6901a70 100644
--- a/test/466-get-live-vreg/src/Main.java
+++ b/test/466-get-live-vreg/src/Main.java
@@ -18,9 +18,9 @@ public class Main {
public Main() {
}
- static int testLiveArgument(int arg) {
+ static int $noinline$testLiveArgument(int arg1, Integer arg2) {
doStaticNativeCallLiveVreg();
- return arg;
+ return arg1 + arg2.intValue();
}
static void moveArgToCalleeSave() {
@@ -31,7 +31,7 @@ public class Main {
}
}
- static void $opt$noinline$testIntervalHole(int arg, boolean test) {
+ static void $noinline$testIntervalHole(int arg, boolean test) {
// Move the argument to callee save to ensure it is in
// a readable register.
moveArgToCalleeSave();
@@ -53,16 +53,18 @@ public class Main {
public static void main(String[] args) {
System.loadLibrary(args[0]);
- if (testLiveArgument(staticField3) != staticField3) {
- throw new Error("Expected " + staticField3);
+ if ($noinline$testLiveArgument(staticField3, Integer.valueOf(1)) != staticField3 + 1) {
+ throw new Error("Expected " + staticField3 + 1);
}
- if (testLiveArgument(staticField3) != staticField3) {
- throw new Error("Expected " + staticField3);
+ if ($noinline$testLiveArgument(staticField3,Integer.valueOf(1)) != staticField3 + 1) {
+ throw new Error("Expected " + staticField3 + 1);
}
testWrapperIntervalHole(1, true);
testWrapperIntervalHole(1, false);
+
+ $noinline$testCodeSinking(1);
}
// Wrapper method to avoid inlining, which affects liveness
@@ -70,12 +72,25 @@ public class Main {
static void testWrapperIntervalHole(int arg, boolean test) {
try {
Thread.sleep(0);
- $opt$noinline$testIntervalHole(arg, test);
+ $noinline$testIntervalHole(arg, test);
} catch (Exception e) {
throw new Error(e);
}
}
+ // The value of dex register which originally holded "Object[] o = new Object[1];" will not be
+ // live at the call to doStaticNativeCallLiveVreg after code sinking optimizizaion.
+ static void $noinline$testCodeSinking(int x) {
+ Object[] o = new Object[1];
+ o[0] = o;
+ doStaticNativeCallLiveVreg();
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
+ static boolean doThrow;
+
static int staticField1;
static int staticField2;
static int staticField3 = 42;
diff --git a/test/477-checker-bound-type/src/Main.java b/test/477-checker-bound-type/src/Main.java
index 2504ab2839..237e4dafb6 100644
--- a/test/477-checker-bound-type/src/Main.java
+++ b/test/477-checker-bound-type/src/Main.java
@@ -57,5 +57,79 @@ public class Main {
}
}
- public static void main(String[] args) { }
+ /// CHECK-START: void Main.boundTypeInLoop(int[]) licm (before)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<BoundT:l\d+>> BoundType [<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength [<<BoundT>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START: void Main.boundTypeInLoop(int[]) licm (after)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<BoundT:l\d+>> BoundType [<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength [<<BoundT>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: BoundType
+
+ /// CHECK-START: void Main.boundTypeInLoop(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<BoundTA:l\d+>> BoundType [<<Param>>] loop:none
+ /// CHECK-DAG: ArrayLength [<<BoundTA>>] loop:none
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArraySet loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<BoundT:l\d+>> BoundType [<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength [<<BoundT>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+
+ /// CHECK-START: void Main.boundTypeInLoop(int[]) GVN$after_arch (after)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<BoundTA:l\d+>> BoundType [<<Param>>] loop:none
+ /// CHECK-DAG: ArrayLength [<<BoundTA>>] loop:none
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArraySet loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: BoundType
+ /// CHECK-NOT: ArrayLength
+ private static void boundTypeInLoop(int[] a) {
+ for (int i = 0; a != null && i < a.length; i++) {
+ a[i] += 1;
+ }
+ }
+
+ // BoundType must not be hoisted by LICM, in this example it leads to ArrayLength being
+ // hoisted as well which is invalid.
+ //
+ /// CHECK-START: void Main.BoundTypeNoLICM(java.lang.Object) licm (before)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: SuspendCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Bound1:l\d+>> BoundType [<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bound2:l\d+>> BoundType [<<Bound1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength [<<Bound2>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-START: void Main.BoundTypeNoLICM(java.lang.Object) licm (after)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: SuspendCheck loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Bound1:l\d+>> BoundType [<<Param>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Bound2:l\d+>> BoundType [<<Bound1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayLength [<<Bound2>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: BoundType loop:none
+ private static void BoundTypeNoLICM(Object obj) {
+ int i = 0;
+ while (obj instanceof int[]) {
+ int[] a = (int[])obj;
+ a[0] = 1;
+ }
+ }
+
+ public static void main(String[] args) { }
}
diff --git a/test/530-checker-lse/smali/Main.smali b/test/530-checker-lse/smali/Main.smali
index 267801760f..4c18266c01 100644
--- a/test/530-checker-lse/smali/Main.smali
+++ b/test/530-checker-lse/smali/Main.smali
@@ -124,6 +124,38 @@
goto :goto_5
.end method
+## CHECK-START: int Main2.test10(TestClass) load_store_elimination (before)
+## CHECK: StaticFieldGet
+## CHECK: InstanceFieldGet
+## CHECK: StaticFieldSet
+## CHECK: InstanceFieldGet
+
+## CHECK-START: int Main2.test10(TestClass) load_store_elimination (after)
+## CHECK: StaticFieldGet
+## CHECK: InstanceFieldGet
+## CHECK: StaticFieldSet
+## CHECK-NOT: NullCheck
+## CHECK-NOT: InstanceFieldGet
+
+# Original java source:
+#
+# // Static fields shouldn't alias with instance fields.
+# static int test10(TestClass obj) {
+# TestClass.si += obj.i;
+# return obj.i;
+# }
+
+.method public static test10(LTestClass;)I
+ .registers 3
+ .param p0, "obj" # LTestClass;
+ sget v0, LTestClass;->si:I
+ iget v1, p0, LTestClass;->i:I
+ add-int/2addr v0, v1
+ sput v0, LTestClass;->si:I
+ iget p0, p0, LTestClass;->i:I
+ return p0
+.end method
+
## CHECK-START: int Main2.test23(boolean) load_store_elimination (before)
## CHECK: NewInstance
## CHECK: InstanceFieldSet
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index bd1744cc5f..22bff0aaf5 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -251,25 +251,6 @@ public class Main {
return obj2.i;
}
- /// CHECK-START: int Main.test10(TestClass) load_store_elimination (before)
- /// CHECK: StaticFieldGet
- /// CHECK: InstanceFieldGet
- /// CHECK: StaticFieldSet
- /// CHECK: InstanceFieldGet
-
- /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
- /// CHECK: StaticFieldGet
- /// CHECK: InstanceFieldGet
- /// CHECK: StaticFieldSet
- /// CHECK-NOT: NullCheck
- /// CHECK-NOT: InstanceFieldGet
-
- // Static fields shouldn't alias with instance fields.
- static int test10(TestClass obj) {
- TestClass.si += obj.i;
- return obj.i;
- }
-
/// CHECK-START: int Main.test11(TestClass) load_store_elimination (before)
/// CHECK: InstanceFieldSet
/// CHECK: InstanceFieldGet
@@ -1177,6 +1158,7 @@ public class Main {
Class main2 = Class.forName("Main2");
Method test4 = main2.getMethod("test4", TestClass.class, boolean.class);
Method test5 = main2.getMethod("test5", TestClass.class, boolean.class);
+ Method test10 = main2.getMethod("test10", TestClass.class);
Method test23 = main2.getMethod("test23", boolean.class);
Method test24 = main2.getMethod("test24");
@@ -1199,7 +1181,7 @@ public class Main {
obj2 = new TestClass();
obj1.next = obj2;
assertIntEquals(test9(new TestClass()), 1);
- assertIntEquals(test10(new TestClass(3, 4)), 3);
+ assertIntEquals((int)test10.invoke(null, new TestClass(3, 4)), 3);
assertIntEquals(TestClass.si, 3);
assertIntEquals(test11(new TestClass()), 10);
assertIntEquals(test12(new TestClass(), new TestClass()), 10);
diff --git a/test/530-checker-lse2/build b/test/530-checker-lse2/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/530-checker-lse2/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/530-checker-peel-unroll/src/Main.java b/test/530-checker-peel-unroll/src/Main.java
index 804c9fe916..11c29649ff 100644
--- a/test/530-checker-peel-unroll/src/Main.java
+++ b/test/530-checker-peel-unroll/src/Main.java
@@ -53,11 +53,17 @@ public class Main {
}
private static final void initIntArray(int[] a) {
- for (int i = 0; i < LENGTH; i++) {
+ for (int i = 0; i < a.length; i++) {
a[i] = i % 4;
}
}
+ private static final void initDoubleArray(double[] a) {
+ for (int i = 0; i < a.length; i++) {
+ a[i] = (double)(i % 4);
+ }
+ }
+
/// CHECK-START: void Main.unrollingLoadStoreElimination(int[]) loop_optimization (before)
/// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
@@ -684,6 +690,96 @@ public class Main {
return s + t;
}
+ /// CHECK-START: void Main.unrollingInstanceOf(int[], java.lang.Object[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: InstanceOf loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: InstanceOf
+
+ /// CHECK-START: void Main.unrollingInstanceOf(int[], java.lang.Object[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: InstanceOf loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: InstanceOf loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: InstanceOf
+ public void unrollingInstanceOf(int[] a, Object[] obj_array) {
+ for (int i = 0; i < LENGTH_B; i++) {
+ if (obj_array[i] instanceof Integer) {
+ a[i] += 1;
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.unrollingDivZeroCheck(int[], int) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: DivZeroCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: DivZeroCheck
+
+ /// CHECK-START: void Main.unrollingDivZeroCheck(int[], int) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: DivZeroCheck loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: DivZeroCheck loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: DivZeroCheck
+ public void unrollingDivZeroCheck(int[] a, int r) {
+ for (int i = 0; i < LENGTH_B; i++) {
+ a[i] += a[i] / r;
+ }
+ }
+
+ /// CHECK-START: void Main.unrollingTypeConversion(int[], double[]) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: TypeConversion loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: TypeConversion
+
+ /// CHECK-START: void Main.unrollingTypeConversion(int[], double[]) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: TypeConversion loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: TypeConversion loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: TypeConversion
+ public void unrollingTypeConversion(int[] a, double[] b) {
+ for (int i = 0; i < LENGTH_B; i++) {
+ a[i] = (int) b[i];
+ }
+ }
+
+ interface Itf {
+ }
+
+ class SubMain extends Main implements Itf {
+ }
+
+ /// CHECK-START: void Main.unrollingCheckCast(int[], java.lang.Object) loop_optimization (before)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: CheckCast loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: CheckCast
+
+ /// CHECK-START: void Main.unrollingCheckCast(int[], java.lang.Object) loop_optimization (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: CheckCast loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: CheckCast loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: CheckCast
+ public void unrollingCheckCast(int[] a, Object o) {
+ for (int i = 0; i < LENGTH_B; i++) {
+ if (((SubMain)o) == o) {
+ a[i] = i;
+ }
+ }
+ }
+
/// CHECK-START: void Main.noUnrollingOddTripCount(int[]) loop_optimization (before)
/// CHECK-DAG: <<Array:l\d+>> ParameterValue loop:none
/// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
@@ -985,9 +1081,17 @@ public class Main {
initMatrix(mB);
initMatrix(mC);
- int expected = 174291419;
+ int expected = 174291515;
int found = 0;
+ double[] doubleArray = new double[LENGTH_B];
+ initDoubleArray(doubleArray);
+
+ unrollingInstanceOf(a, new Integer[LENGTH_B]);
+ unrollingDivZeroCheck(a, 15);
+ unrollingTypeConversion(a, doubleArray);
+ unrollingCheckCast(a, new SubMain());
+
unrollingWhile(a);
unrollingLoadStoreElimination(a);
unrollingSwitch(a);
diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java
index b3e4a60e9a..8311b60df7 100644
--- a/test/551-checker-shifter-operand/src/Main.java
+++ b/test/551-checker-shifter-operand/src/Main.java
@@ -896,7 +896,7 @@ public class Main {
}
// Each test line below should see one merge.
- /// CHECK-START-ARM: void Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm (after)
+ /// CHECK-START-ARM: long[] Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm (after)
/// CHECK: DataProcWithShifterOp
/// CHECK: DataProcWithShifterOp
/// CHECK: DataProcWithShifterOp
@@ -933,7 +933,7 @@ public class Main {
/// CHECK-NOT: DataProcWithShifterOp
// On ARM shifts by 1 are not merged.
- /// CHECK-START-ARM: void Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm (after)
+ /// CHECK-START-ARM: long[] Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm (after)
/// CHECK: Shl
/// CHECK-NOT: Shl
/// CHECK: Shr
@@ -941,7 +941,7 @@ public class Main {
/// CHECK: UShr
/// CHECK-NOT: UShr
- /// CHECK-START-ARM64: void Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm64 (after)
+ /// CHECK-START-ARM64: long[] Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm64 (after)
/// CHECK: DataProcWithShifterOp
/// CHECK: DataProcWithShifterOp
/// CHECK: DataProcWithShifterOp
@@ -980,50 +980,98 @@ public class Main {
/// CHECK: DataProcWithShifterOp
/// CHECK-NOT: DataProcWithShifterOp
- /// CHECK-START-ARM64: void Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm64 (after)
+ /// CHECK-START-ARM64: long[] Main.$opt$validateShiftLong(long, long) instruction_simplifier_arm64 (after)
/// CHECK-NOT: Shl
/// CHECK-NOT: Shr
/// CHECK-NOT: UShr
- public static void $opt$validateShiftLong(long a, long b) {
- assertLongEquals(a + $noinline$LongShl(b, 1), a + (b << 1));
- assertLongEquals(a + $noinline$LongShl(b, 6), a + (b << 6));
- assertLongEquals(a + $noinline$LongShl(b, 7), a + (b << 7));
- assertLongEquals(a + $noinline$LongShl(b, 8), a + (b << 8));
- assertLongEquals(a + $noinline$LongShl(b, 14), a + (b << 14));
- assertLongEquals(a + $noinline$LongShl(b, 15), a + (b << 15));
- assertLongEquals(a + $noinline$LongShl(b, 16), a + (b << 16));
- assertLongEquals(a + $noinline$LongShl(b, 30), a + (b << 30));
- assertLongEquals(a + $noinline$LongShl(b, 31), a + (b << 31));
- assertLongEquals(a + $noinline$LongShl(b, 32), a + (b << 32));
- assertLongEquals(a + $noinline$LongShl(b, 62), a + (b << 62));
- assertLongEquals(a + $noinline$LongShl(b, 63), a + (b << 63));
-
- assertLongEquals(a - $noinline$LongShr(b, 1), a - (b >> 1));
- assertLongEquals(a - $noinline$LongShr(b, 6), a - (b >> 6));
- assertLongEquals(a - $noinline$LongShr(b, 7), a - (b >> 7));
- assertLongEquals(a - $noinline$LongShr(b, 8), a - (b >> 8));
- assertLongEquals(a - $noinline$LongShr(b, 14), a - (b >> 14));
- assertLongEquals(a - $noinline$LongShr(b, 15), a - (b >> 15));
- assertLongEquals(a - $noinline$LongShr(b, 16), a - (b >> 16));
- assertLongEquals(a - $noinline$LongShr(b, 30), a - (b >> 30));
- assertLongEquals(a - $noinline$LongShr(b, 31), a - (b >> 31));
- assertLongEquals(a - $noinline$LongShr(b, 32), a - (b >> 32));
- assertLongEquals(a - $noinline$LongShr(b, 62), a - (b >> 62));
- assertLongEquals(a - $noinline$LongShr(b, 63), a - (b >> 63));
-
- assertLongEquals(a ^ $noinline$LongUshr(b, 1), a ^ (b >>> 1));
- assertLongEquals(a ^ $noinline$LongUshr(b, 6), a ^ (b >>> 6));
- assertLongEquals(a ^ $noinline$LongUshr(b, 7), a ^ (b >>> 7));
- assertLongEquals(a ^ $noinline$LongUshr(b, 8), a ^ (b >>> 8));
- assertLongEquals(a ^ $noinline$LongUshr(b, 14), a ^ (b >>> 14));
- assertLongEquals(a ^ $noinline$LongUshr(b, 15), a ^ (b >>> 15));
- assertLongEquals(a ^ $noinline$LongUshr(b, 16), a ^ (b >>> 16));
- assertLongEquals(a ^ $noinline$LongUshr(b, 30), a ^ (b >>> 30));
- assertLongEquals(a ^ $noinline$LongUshr(b, 31), a ^ (b >>> 31));
- assertLongEquals(a ^ $noinline$LongUshr(b, 32), a ^ (b >>> 32));
- assertLongEquals(a ^ $noinline$LongUshr(b, 62), a ^ (b >>> 62));
- assertLongEquals(a ^ $noinline$LongUshr(b, 63), a ^ (b >>> 63));
+ public static long[] $opt$validateShiftLong(long a, long b) {
+ long[] results = new long[36];
+
+ results[0] = a + (b << 1);
+ results[1] = a + (b << 6);
+ results[2] = a + (b << 7);
+ results[3] = a + (b << 8);
+ results[4] = a + (b << 14);
+ results[5] = a + (b << 15);
+ results[6] = a + (b << 16);
+ results[7] = a + (b << 30);
+ results[8] = a + (b << 31);
+ results[9] = a + (b << 32);
+ results[10] = a + (b << 62);
+ results[11] = a + (b << 63);
+
+ results[12] = a - (b >> 1);
+ results[13] = a - (b >> 6);
+ results[14] = a - (b >> 7);
+ results[15] = a - (b >> 8);
+ results[16] = a - (b >> 14);
+ results[17] = a - (b >> 15);
+ results[18] = a - (b >> 16);
+ results[19] = a - (b >> 30);
+ results[20] = a - (b >> 31);
+ results[21] = a - (b >> 32);
+ results[22] = a - (b >> 62);
+ results[23] = a - (b >> 63);
+
+ results[24] = a ^ (b >>> 1);
+ results[25] = a ^ (b >>> 6);
+ results[26] = a ^ (b >>> 7);
+ results[27] = a ^ (b >>> 8);
+ results[28] = a ^ (b >>> 14);
+ results[29] = a ^ (b >>> 15);
+ results[30] = a ^ (b >>> 16);
+ results[31] = a ^ (b >>> 30);
+ results[32] = a ^ (b >>> 31);
+ results[33] = a ^ (b >>> 32);
+ results[34] = a ^ (b >>> 62);
+ results[35] = a ^ (b >>> 63);
+
+ return results;
+ }
+
+ public static void $opt$validateShiftLongAsserts(long a, long b) {
+ long[] results = $opt$validateShiftLong(a, b);
+ assertIntEquals(3 * 12, results.length);
+
+ assertLongEquals(a + $noinline$LongShl(b, 1), results[0]);
+ assertLongEquals(a + $noinline$LongShl(b, 6), results[1]);
+ assertLongEquals(a + $noinline$LongShl(b, 7), results[2]);
+ assertLongEquals(a + $noinline$LongShl(b, 8), results[3]);
+ assertLongEquals(a + $noinline$LongShl(b, 14), results[4]);
+ assertLongEquals(a + $noinline$LongShl(b, 15), results[5]);
+ assertLongEquals(a + $noinline$LongShl(b, 16), results[6]);
+ assertLongEquals(a + $noinline$LongShl(b, 30), results[7]);
+ assertLongEquals(a + $noinline$LongShl(b, 31), results[8]);
+ assertLongEquals(a + $noinline$LongShl(b, 32), results[9]);
+ assertLongEquals(a + $noinline$LongShl(b, 62), results[10]);
+ assertLongEquals(a + $noinline$LongShl(b, 63), results[11]);
+
+ assertLongEquals(a - $noinline$LongShr(b, 1), results[12]);
+ assertLongEquals(a - $noinline$LongShr(b, 6), results[13]);
+ assertLongEquals(a - $noinline$LongShr(b, 7), results[14]);
+ assertLongEquals(a - $noinline$LongShr(b, 8), results[15]);
+ assertLongEquals(a - $noinline$LongShr(b, 14), results[16]);
+ assertLongEquals(a - $noinline$LongShr(b, 15), results[17]);
+ assertLongEquals(a - $noinline$LongShr(b, 16), results[18]);
+ assertLongEquals(a - $noinline$LongShr(b, 30), results[19]);
+ assertLongEquals(a - $noinline$LongShr(b, 31), results[20]);
+ assertLongEquals(a - $noinline$LongShr(b, 32), results[21]);
+ assertLongEquals(a - $noinline$LongShr(b, 62), results[22]);
+ assertLongEquals(a - $noinline$LongShr(b, 63), results[23]);
+
+ assertLongEquals(a ^ $noinline$LongUshr(b, 1), results[24]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 6), results[25]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 7), results[26]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 8), results[27]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 14), results[28]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 15), results[29]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 16), results[30]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 30), results[31]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 31), results[32]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 32), results[33]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 62), results[34]);
+ assertLongEquals(a ^ $noinline$LongUshr(b, 63), results[35]);
}
@@ -1072,7 +1120,7 @@ public class Main {
$opt$validateExtendLong(inputs[i], inputs[j]);
$opt$validateShiftInt((int)inputs[i], (int)inputs[j]);
- $opt$validateShiftLong(inputs[i], inputs[j]);
+ $opt$validateShiftLongAsserts(inputs[i], inputs[j]);
}
}
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 8898c48ea1..9d10bd77dd 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -142,8 +142,7 @@
# Irreducible loop
if-eqz p1, :loop_entry
:loop_header
- const v1, 0x1
- xor-int p1, p1, v1
+ xor-int/lit8 p1, p1, 0x1
:loop_entry
if-eqz p1, :string_init
goto :loop_header
@@ -166,8 +165,7 @@
:loop_header
if-eqz p1, :string_init
:loop_entry
- const v1, 0x1
- xor-int p1, p1, v1
+ xor-int/lit8 p1, p1, 0x1
goto :loop_header
:string_init
@@ -187,8 +185,7 @@
# Irreducible loop
if-eqz p1, :loop_entry
:loop_header
- const v1, 0x1
- xor-int p1, p1, v1
+ xor-int/lit8 p1, p1, 0x1
:loop_entry
if-eqz p1, :string_init
goto :loop_header
@@ -199,3 +196,112 @@
return-object v2
.end method
+
+# Test with a loop between allocation and String.<init>.
+.method public static loopAndStringInit([BZ)Ljava/lang/String;
+ .registers 5
+
+ new-instance v0, Ljava/lang/String;
+
+ # Loop
+ :loop_header
+ if-eqz p1, :loop_exit
+ xor-int/lit8 p1, p1, 0x1
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ return-object v0
+
+.end method
+
+# Test with a loop and aliases between allocation and String.<init>.
+.method public static loopAndStringInitAlias([BZ)Ljava/lang/String;
+ .registers 5
+
+ new-instance v0, Ljava/lang/String;
+ move-object v2, v0
+
+ # Loop
+ :loop_header
+ if-eqz p1, :loop_exit
+ xor-int/lit8 p1, p1, 0x1
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ return-object v2
+
+.end method
+
+# Test deoptimization after String initialization of a phi.
+## CHECK-START: int TestCase.deoptimizeNewInstanceAfterLoop(int[], byte[], int) register (after)
+## CHECK: <<Invoke:l\d+>> InvokeStaticOrDirect method_name:java.lang.String.<init>
+## CHECK: Deoptimize env:[[<<Invoke>>,{{.*]]}}
+
+.method public static deoptimizeNewInstanceAfterLoop([I[BI)I
+ .registers 8
+
+ const v2, 0x0
+ const v1, 0x1
+
+ new-instance v0, Ljava/lang/String; # HNewInstance(String)
+ move-object v4, v0
+ # Loop
+ :loop_header
+ if-eqz p2, :loop_exit
+ xor-int/lit8 p2, p2, 0x1
+ goto :loop_header
+
+ :loop_exit
+ const-string v3, "UTF8"
+ invoke-direct {v0, p1, v3}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+
+ # Deoptimize here if the array is too short.
+ aget v1, p0, v1 # v1 = int_array[0x1]
+ add-int/2addr v2, v1 # v2 = 0x0 + v1
+
+ # Check that we're being executed by the interpreter.
+ invoke-static {}, LMain;->assertIsInterpreted()V
+
+ # Check that the environments contain the right string.
+ invoke-static {p1, v0}, LMain;->assertEqual([BLjava/lang/String;)V
+ invoke-static {p1, v4}, LMain;->assertEqual([BLjava/lang/String;)V
+
+ # This ArrayGet will throw ArrayIndexOutOfBoundsException.
+ const v1, 0x4
+ aget v1, p0, v1
+ add-int/2addr v2, v1
+
+ return v2
+
+.end method
+
+# Test with a loop between allocation and String.<init> and a null check.
+## CHECK-START: java.lang.String TestCase.loopAndStringInitAndTest(byte[], boolean) builder (after)
+## CHECK-DAG: <<Null:l\d+>> NullConstant
+## CHECK-DAG: <<String:l\d+>> NewInstance
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<String>>,<<Null>>]
+
+## CHECK-START: java.lang.String TestCase.loopAndStringInitAndTest(byte[], boolean) register (after)
+## CHECK-DAG: <<String:l\d+>> NewInstance
+.method public static loopAndStringInitAndTest([BZ)Ljava/lang/String;
+ .registers 5
+
+ new-instance v0, Ljava/lang/String;
+
+ # Loop
+ :loop_header
+ # Use the new-instance in the only way it can be used.
+ if-nez v0, :loop_exit
+ xor-int/lit8 p1, p1, 0x1
+ goto :loop_header
+
+ :loop_exit
+ const-string v1, "UTF8"
+ invoke-direct {v0, p0, v1}, Ljava/lang/String;-><init>([BLjava/lang/String;)V
+ return-object v0
+
+.end method
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index d38b7f4f23..3639d59878 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -23,12 +23,19 @@ public class Main {
public static native void assertIsInterpreted();
- private static void assertEqual(String expected, String actual) {
+ public static void assertEqual(String expected, String actual) {
if (!expected.equals(actual)) {
throw new Error("Assertion failed: " + expected + " != " + actual);
}
}
+ public static void assertEqual(byte[] expected, String actual) throws Exception {
+ String str = new String(expected, "UTF8");
+ if (!str.equals(actual)) {
+ throw new Error("Assertion failed: " + str + " != " + actual);
+ }
+ }
+
public static void main(String[] args) throws Throwable {
System.loadLibrary(args[0]);
Class<?> c = Class.forName("TestCase");
@@ -85,6 +92,41 @@ public class Main {
result = (String) m.invoke(null, new Object[] { testData, false });
assertEqual(testString, result);
}
+ {
+ Method m = c.getMethod("loopAndStringInit", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, true });
+ assertEqual(testString, result);
+ result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
+ {
+ Method m = c.getMethod("loopAndStringInitAlias", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, true });
+ assertEqual(testString, result);
+ result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
+ {
+ Method m = c.getMethod("loopAndStringInitAndTest", byte[].class, boolean.class);
+ String result = (String) m.invoke(null, new Object[] { testData, true });
+ assertEqual(testString, result);
+ result = (String) m.invoke(null, new Object[] { testData, false });
+ assertEqual(testString, result);
+ }
+
+ {
+ Method m = c.getMethod(
+ "deoptimizeNewInstanceAfterLoop", int[].class, byte[].class, int.class);
+ try {
+ m.invoke(null, new Object[] { new int[] { 1, 2, 3 }, testData, 0 });
+ } catch (InvocationTargetException ex) {
+ if (ex.getCause() instanceof ArrayIndexOutOfBoundsException) {
+ // Expected.
+ } else {
+ throw ex.getCause();
+ }
+ }
+ }
}
public static boolean doThrow = false;
diff --git a/test/565-checker-condition-liveness/info.txt b/test/565-checker-condition-liveness/info.txt
index 67b6ceb53f..e716c04491 100644
--- a/test/565-checker-condition-liveness/info.txt
+++ b/test/565-checker-condition-liveness/info.txt
@@ -1 +1 @@
-Test the use positions of inputs of non-materialized conditions. \ No newline at end of file
+Test the results of liveness analysis e.g. use positions of inputs of non-materialized conditions. \ No newline at end of file
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index acfcecdba8..6b6619fa43 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -31,6 +31,82 @@ public class Main {
return (arg > 5.0f) ? 0 : -1;
}
+ /// CHECK-START: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[21,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[21,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+ /// CHECK-DAG: TryBoundary
+
+ /// CHECK-START-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,21,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,21,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,21,25]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[21,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+ /// CHECK-DAG: TryBoundary
+ //
+ // A value live at a throwing instruction in a try block may be copied by
+ // the exception handler to its location at the top of the catch block.
+ public static void testThrowIntoCatchBlock(int x, Object y, int[] a) {
+ try {
+ a[1] = x;
+ } catch (ArrayIndexOutOfBoundsException exception) {
+ }
+ }
+
+ /// CHECK-START: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+
+ /// CHECK-START-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[11,17,21]
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 env_uses:[17,21]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:10
+ /// CHECK-DAG: NullCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:16
+ /// CHECK-DAG: BoundsCheck env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:20
+ public static void testBoundsCheck(int x, Object y, int[] a) {
+ a[1] = x;
+ }
+
+ /// CHECK-START: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:12
+ /// CHECK-DAG: NullCheck env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:18
+ /// CHECK-DAG: Deoptimize env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+
+ /// CHECK-START-DEBUGGABLE: void Main.testDeoptimize(int, java.lang.Object, int[]) liveness (after)
+ /// CHECK-DAG: <<IntArg:i\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<RefArg:l\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<Array:l\d+>> ParameterValue env_uses:[13,19,25]
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 env_uses:[19,25]
+ /// CHECK-DAG: SuspendCheck env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:12
+ /// CHECK-DAG: NullCheck env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:18
+ /// CHECK-DAG: Deoptimize env:[[<<Const0>>,<<IntArg>>,<<RefArg>>,<<Array>>]] liveness:24
+ //
+ // A value that's not live in compiled code may still be needed in interpreter,
+ // due to code motion, etc.
+ public static void testDeoptimize(int x, Object y, int[] a) {
+ a[0] = x;
+ a[1] = x;
+ }
+
+
/// CHECK-START: void Main.main(java.lang.String[]) liveness (after)
/// CHECK: <<X:i\d+>> ArrayLength uses:[<<UseInput:\d+>>]
/// CHECK: <<Y:i\d+>> StaticFieldGet uses:[<<UseInput>>]
@@ -44,7 +120,15 @@ public class Main {
if (x > y) {
System.nanoTime();
}
+
+ int val = 14;
+ int[] array = new int[2];
+ Integer intObj = Integer.valueOf(0);
+ testThrowIntoCatchBlock(val, intObj, array);
+ testBoundsCheck(val, intObj, array);
+ testDeoptimize(val, intObj, array);
}
+
public static int field = 42;
}
diff --git a/test/565-checker-doublenegbitwise/build b/test/565-checker-doublenegbitwise/build
deleted file mode 100755
index 10ffcc537d..0000000000
--- a/test/565-checker-doublenegbitwise/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/565-checker-doublenegbitwise/smali/SmaliTests.smali b/test/565-checker-doublenegbitwise/smali/SmaliTests.smali
index 2e0802276e..ce691549ce 100644
--- a/test/565-checker-doublenegbitwise/smali/SmaliTests.smali
+++ b/test/565-checker-doublenegbitwise/smali/SmaliTests.smali
@@ -403,3 +403,591 @@
sput-boolean v0, LSmaliTests;->doThrow:Z
return-void
.end method
+
+
+# Test transformation of Not/Not/And into Or/Not.
+
+# Note: before the instruction_simplifier pass, Xor's are used instead of
+# Not's (the simplification happens during the same pass).
+## CHECK-START: int SmaliTests.$opt$noinline$andToOrV2(int, int) instruction_simplifier (before)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
+## CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
+## CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+## CHECK-DAG: <<And:i\d+>> And [<<Not1>>,<<Not2>>]
+## CHECK-DAG: Return [<<And>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$andToOrV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<Or:i\d+>> Or [<<P1>>,<<P2>>]
+## CHECK-DAG: <<Not:i\d+>> Not [<<Or>>]
+## CHECK-DAG: Return [<<Not>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$andToOrV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: Not
+## CHECK-NOT: Not
+
+## CHECK-START: int SmaliTests.$opt$noinline$andToOrV2(int, int) instruction_simplifier (after)
+## CHECK-NOT: And
+
+# Original java source:
+#
+# public static int $opt$noinline$andToOr(int a, int b) {
+# if (doThrow) throw new Error();
+# return ~a & ~b;
+# }
+
+.method public static $opt$noinline$andToOrV2(II)I
+ .registers 4
+ .param p0, "a" # I
+ .param p1, "b" # I
+
+ .prologue
+ .line 85
+ sget-boolean v0, LMain;->doThrow:Z
+
+ if-eqz v0, :cond_a
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 86
+ :cond_a
+ xor-int/lit8 v0, p0, -0x1
+
+ xor-int/lit8 v1, p1, -0x1
+
+ and-int/2addr v0, v1
+
+ return v0
+.end method
+
+
+# Test transformation of Not/Not/And into Or/Not for boolean negations.
+# Note that the graph before this instruction simplification pass does not
+# contain `HBooleanNot` instructions. This is because this transformation
+# follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+# same pass.
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanAndToOrV2(boolean, boolean) instruction_simplifier$after_gvn (before)
+## CHECK-DAG: <<P1:z\d+>> ParameterValue
+## CHECK-DAG: <<P2:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
+## CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
+## CHECK-DAG: <<And:i\d+>> And [<<Select1>>,<<Select2>>]
+## CHECK-DAG: Return [<<And>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanAndToOrV2(boolean, boolean) instruction_simplifier$after_gvn (after)
+## CHECK-DAG: <<Cond1:z\d+>> ParameterValue
+## CHECK-DAG: <<Cond2:z\d+>> ParameterValue
+## CHECK-DAG: <<Or:i\d+>> Or [<<Cond1>>,<<Cond2>>]
+## CHECK-DAG: <<BooleanNot:z\d+>> BooleanNot [<<Or>>]
+## CHECK-DAG: Return [<<BooleanNot>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanAndToOrV2(boolean, boolean) instruction_simplifier$after_bce (after)
+## CHECK-DAG: BooleanNot
+## CHECK-NOT: BooleanNot
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanAndToOrV2(boolean, boolean) instruction_simplifier$after_bce (after)
+## CHECK-NOT: And
+
+# Original java source:
+#
+# public static boolean $opt$noinline$booleanAndToOr(boolean a, boolean b) {
+# if (doThrow) throw new Error();
+# return !a & !b;
+# }
+
+.method public static $opt$noinline$booleanAndToOrV2(ZZ)Z
+ .registers 5
+ .param p0, "a" # Z
+ .param p1, "b" # Z
+
+ .prologue
+ const/4 v0, 0x1
+
+ const/4 v1, 0x0
+
+ .line 122
+ sget-boolean v2, LMain;->doThrow:Z
+
+ if-eqz v2, :cond_c
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 123
+ :cond_c
+ if-nez p0, :cond_13
+
+ move v2, v0
+
+ :goto_f
+ if-nez p1, :cond_15
+
+ :goto_11
+ and-int/2addr v0, v2
+
+ return v0
+
+ :cond_13
+ move v2, v1
+
+ goto :goto_f
+
+ :cond_15
+ move v0, v1
+
+ goto :goto_11
+.end method
+
+
+# Test transformation of Not/Not/Or into And/Not.
+
+# See note above.
+# The second Xor has its arguments reversed for no obvious reason.
+## CHECK-START: long SmaliTests.$opt$noinline$orToAndV2(long, long) instruction_simplifier (before)
+## CHECK-DAG: <<P1:j\d+>> ParameterValue
+## CHECK-DAG: <<P2:j\d+>> ParameterValue
+## CHECK-DAG: <<CstM1:j\d+>> LongConstant -1
+## CHECK-DAG: <<Not1:j\d+>> Xor [<<P1>>,<<CstM1>>]
+## CHECK-DAG: <<Not2:j\d+>> Xor [<<CstM1>>,<<P2>>]
+## CHECK-DAG: <<Or:j\d+>> Or [<<Not1>>,<<Not2>>]
+## CHECK-DAG: Return [<<Or>>]
+
+## CHECK-START: long SmaliTests.$opt$noinline$orToAndV2(long, long) instruction_simplifier (after)
+## CHECK-DAG: <<P1:j\d+>> ParameterValue
+## CHECK-DAG: <<P2:j\d+>> ParameterValue
+## CHECK-DAG: <<And:j\d+>> And [<<P1>>,<<P2>>]
+## CHECK-DAG: <<Not:j\d+>> Not [<<And>>]
+## CHECK-DAG: Return [<<Not>>]
+
+## CHECK-START: long SmaliTests.$opt$noinline$orToAndV2(long, long) instruction_simplifier (after)
+## CHECK-DAG: Not
+## CHECK-NOT: Not
+
+## CHECK-START: long SmaliTests.$opt$noinline$orToAndV2(long, long) instruction_simplifier (after)
+## CHECK-NOT: Or
+
+# Original java source:
+#
+# public static long $opt$noinline$orToAnd(long a, long b) {
+# if (doThrow) throw new Error();
+# return ~a | ~b;
+# }
+
+.method public static $opt$noinline$orToAndV2(JJ)J
+ .registers 8
+ .param p0, "a" # J
+ .param p2, "b" # J
+
+ .prologue
+ const-wide/16 v2, -0x1
+
+ .line 156
+ sget-boolean v0, LMain;->doThrow:Z
+
+ if-eqz v0, :cond_c
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 157
+ :cond_c
+ xor-long v0, p0, v2
+
+ xor-long/2addr v2, p2
+
+ or-long/2addr v0, v2
+
+ return-wide v0
+.end method
+
+# Test transformation of Not/Not/Or into Or/And for boolean negations.
+# Note that the graph before this instruction simplification pass does not
+# contain `HBooleanNot` instructions. This is because this transformation
+# follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+# same pass.
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanOrToAndV2(boolean, boolean) instruction_simplifier$after_gvn (before)
+## CHECK-DAG: <<P1:z\d+>> ParameterValue
+## CHECK-DAG: <<P2:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
+## CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
+## CHECK-DAG: <<Or:i\d+>> Or [<<Select1>>,<<Select2>>]
+## CHECK-DAG: Return [<<Or>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanOrToAndV2(boolean, boolean) instruction_simplifier$after_gvn (after)
+## CHECK-DAG: <<Cond1:z\d+>> ParameterValue
+## CHECK-DAG: <<Cond2:z\d+>> ParameterValue
+## CHECK-DAG: <<And:i\d+>> And [<<Cond1>>,<<Cond2>>]
+## CHECK-DAG: <<BooleanNot:z\d+>> BooleanNot [<<And>>]
+## CHECK-DAG: Return [<<BooleanNot>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanOrToAndV2(boolean, boolean) instruction_simplifier$after_bce (after)
+## CHECK-DAG: BooleanNot
+## CHECK-NOT: BooleanNot
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanOrToAndV2(boolean, boolean) instruction_simplifier$after_bce (after)
+## CHECK-NOT: Or
+
+# Original java source:
+#
+# public static boolean $opt$noinline$booleanOrToAnd(boolean a, boolean b) {
+# if (doThrow) throw new Error();
+# return !a | !b;
+# }
+
+.method public static $opt$noinline$booleanOrToAndV2(ZZ)Z
+ .registers 5
+ .param p0, "a" # Z
+ .param p1, "b" # Z
+
+ .prologue
+ const/4 v0, 0x1
+
+ const/4 v1, 0x0
+
+ .line 193
+ sget-boolean v2, LMain;->doThrow:Z
+
+ if-eqz v2, :cond_c
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 194
+ :cond_c
+ if-nez p0, :cond_13
+
+ move v2, v0
+
+ :goto_f
+ if-nez p1, :cond_15
+
+ :goto_11
+ or-int/2addr v0, v2
+
+ return v0
+
+ :cond_13
+ move v2, v1
+
+ goto :goto_f
+
+ :cond_15
+ move v0, v1
+
+ goto :goto_11
+.end method
+
+
+# Test that the transformation copes with inputs being separated from the
+# bitwise operations.
+# This is a regression test. The initial logic was inserting the new bitwise
+# operation incorrectly.
+
+## CHECK-START: int SmaliTests.$opt$noinline$regressInputsAwayV2(int, int) instruction_simplifier (before)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
+## CHECK-DAG: <<AddP1:i\d+>> Add [<<P1>>,<<Cst1>>]
+## CHECK-DAG: <<Not1:i\d+>> Xor [<<AddP1>>,<<CstM1>>]
+## CHECK-DAG: <<AddP2:i\d+>> Add [<<P2>>,<<Cst1>>]
+## CHECK-DAG: <<Not2:i\d+>> Xor [<<AddP2>>,<<CstM1>>]
+## CHECK-DAG: <<Or:i\d+>> Or [<<Not1>>,<<Not2>>]
+## CHECK-DAG: Return [<<Or>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$regressInputsAwayV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
+## CHECK-DAG: <<AddP1:i\d+>> Add [<<P1>>,<<Cst1>>]
+## CHECK-DAG: <<AddP2:i\d+>> Add [<<P2>>,<<Cst1>>]
+## CHECK-DAG: <<And:i\d+>> And [<<AddP1>>,<<AddP2>>]
+## CHECK-DAG: <<Not:i\d+>> Not [<<And>>]
+## CHECK-DAG: Return [<<Not>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$regressInputsAwayV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: Not
+## CHECK-NOT: Not
+
+## CHECK-START: int SmaliTests.$opt$noinline$regressInputsAwayV2(int, int) instruction_simplifier (after)
+## CHECK-NOT: Or
+
+# Original java source:
+#
+# public static int $opt$noinline$regressInputsAway(int a, int b) {
+# if (doThrow) throw new Error();
+# int a1 = a + 1;
+# int not_a1 = ~a1;
+# int b1 = b + 1;
+# int not_b1 = ~b1;
+# return not_a1 | not_b1;
+# }
+
+.method public static $opt$noinline$regressInputsAwayV2(II)I
+ .registers 7
+ .param p0, "a" # I
+ .param p1, "b" # I
+
+ .prologue
+ .line 234
+ sget-boolean v4, LMain;->doThrow:Z
+
+ if-eqz v4, :cond_a
+
+ new-instance v4, Ljava/lang/Error;
+
+ invoke-direct {v4}, Ljava/lang/Error;-><init>()V
+
+ throw v4
+
+ .line 235
+ :cond_a
+ add-int/lit8 v0, p0, 0x1
+
+ .line 236
+ .local v0, "a1":I
+ xor-int/lit8 v2, v0, -0x1
+
+ .line 237
+ .local v2, "not_a1":I
+ add-int/lit8 v1, p1, 0x1
+
+ .line 238
+ .local v1, "b1":I
+ xor-int/lit8 v3, v1, -0x1
+
+ .line 239
+ .local v3, "not_b1":I
+ or-int v4, v2, v3
+
+ return v4
+.end method
+
+
+# Test transformation of Not/Not/Xor into Xor.
+
+# See first note above.
+## CHECK-START: int SmaliTests.$opt$noinline$notXorToXorV2(int, int) instruction_simplifier (before)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
+## CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
+## CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+## CHECK-DAG: <<Xor:i\d+>> Xor [<<Not1>>,<<Not2>>]
+## CHECK-DAG: Return [<<Xor>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$notXorToXorV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<Xor:i\d+>> Xor [<<P1>>,<<P2>>]
+## CHECK-DAG: Return [<<Xor>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$notXorToXorV2(int, int) instruction_simplifier (after)
+## CHECK-NOT: Not
+
+# Original java source:
+#
+# public static int $opt$noinline$notXorToXor(int a, int b) {
+# if (doThrow) throw new Error();
+# return ~a ^ ~b;
+# }
+
+.method public static $opt$noinline$notXorToXorV2(II)I
+ .registers 4
+ .param p0, "a" # I
+ .param p1, "b" # I
+
+ .prologue
+ .line 266
+ sget-boolean v0, LMain;->doThrow:Z
+
+ if-eqz v0, :cond_a
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 267
+ :cond_a
+ xor-int/lit8 v0, p0, -0x1
+
+ xor-int/lit8 v1, p1, -0x1
+
+ xor-int/2addr v0, v1
+
+ return v0
+.end method
+
+
+# Test transformation of Not/Not/Xor into Xor for boolean negations.
+# Note that the graph before this instruction simplification pass does not
+# contain `HBooleanNot` instructions. This is because this transformation
+# follows the optimization of `HSelect` to `HBooleanNot` occurring in the
+# same pass.
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanNotXorToXorV2(boolean, boolean) instruction_simplifier$after_gvn (before)
+## CHECK-DAG: <<P1:z\d+>> ParameterValue
+## CHECK-DAG: <<P2:z\d+>> ParameterValue
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
+## CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
+## CHECK-DAG: <<Xor:i\d+>> Xor [<<Select1>>,<<Select2>>]
+## CHECK-DAG: Return [<<Xor>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanNotXorToXorV2(boolean, boolean) instruction_simplifier$after_gvn (after)
+## CHECK-DAG: <<Cond1:z\d+>> ParameterValue
+## CHECK-DAG: <<Cond2:z\d+>> ParameterValue
+## CHECK-DAG: <<Xor:i\d+>> Xor [<<Cond1>>,<<Cond2>>]
+## CHECK-DAG: Return [<<Xor>>]
+
+## CHECK-START: boolean SmaliTests.$opt$noinline$booleanNotXorToXorV2(boolean, boolean) instruction_simplifier$after_bce (after)
+## CHECK-NOT: BooleanNot
+
+# Original java source:
+#
+# public static boolean $opt$noinline$booleanNotXorToXor(boolean a, boolean b) {
+# if (doThrow) throw new Error();
+# return !a ^ !b;
+# }
+
+.method public static $opt$noinline$booleanNotXorToXorV2(ZZ)Z
+ .registers 5
+ .param p0, "a" # Z
+ .param p1, "b" # Z
+
+ .prologue
+ const/4 v0, 0x1
+
+ const/4 v1, 0x0
+
+ .line 298
+ sget-boolean v2, LMain;->doThrow:Z
+
+ if-eqz v2, :cond_c
+
+ new-instance v0, Ljava/lang/Error;
+
+ invoke-direct {v0}, Ljava/lang/Error;-><init>()V
+
+ throw v0
+
+ .line 299
+ :cond_c
+ if-nez p0, :cond_13
+
+ move v2, v0
+
+ :goto_f
+ if-nez p1, :cond_15
+
+ :goto_11
+ xor-int/2addr v0, v2
+
+ return v0
+
+ :cond_13
+ move v2, v1
+
+ goto :goto_f
+
+ :cond_15
+ move v0, v1
+
+ goto :goto_11
+.end method
+
+
+# Check that no transformation is done when one Not has multiple uses.
+
+## CHECK-START: int SmaliTests.$opt$noinline$notMultipleUsesV2(int, int) instruction_simplifier (before)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
+## CHECK-DAG: <<And2:i\d+>> And [<<Not2>>,<<One>>]
+## CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
+## CHECK-DAG: <<And1:i\d+>> And [<<Not1>>,<<Not2>>]
+## CHECK-DAG: <<Add:i\d+>> Add [<<And2>>,<<And1>>]
+## CHECK-DAG: Return [<<Add>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$notMultipleUsesV2(int, int) instruction_simplifier (after)
+## CHECK-DAG: <<P1:i\d+>> ParameterValue
+## CHECK-DAG: <<P2:i\d+>> ParameterValue
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Not2:i\d+>> Not [<<P2>>]
+## CHECK-DAG: <<And2:i\d+>> And [<<Not2>>,<<One>>]
+## CHECK-DAG: <<Not1:i\d+>> Not [<<P1>>]
+## CHECK-DAG: <<And1:i\d+>> And [<<Not1>>,<<Not2>>]
+## CHECK-DAG: <<Add:i\d+>> Add [<<And2>>,<<And1>>]
+## CHECK-DAG: Return [<<Add>>]
+
+## CHECK-START: int SmaliTests.$opt$noinline$notMultipleUsesV2(int, int) instruction_simplifier (after)
+## CHECK-NOT: Or
+
+# Original java source:
+#
+# public static int $opt$noinline$notMultipleUses(int a, int b) {
+# if (doThrow) throw new Error();
+# int tmp = ~b;
+# return (tmp & 0x1) + (~a & tmp);
+# }
+
+.method public static $opt$noinline$notMultipleUsesV2(II)I
+ .registers 5
+ .param p0, "a" # I
+ .param p1, "b" # I
+
+ .prologue
+ .line 333
+ sget-boolean v1, LMain;->doThrow:Z
+
+ if-eqz v1, :cond_a
+
+ new-instance v1, Ljava/lang/Error;
+
+ invoke-direct {v1}, Ljava/lang/Error;-><init>()V
+
+ throw v1
+
+ .line 334
+ :cond_a
+ xor-int/lit8 v0, p1, -0x1
+
+ .line 335
+ .local v0, "tmp":I
+ and-int/lit8 v1, v0, 0x1
+
+ xor-int/lit8 v2, p0, -0x1
+
+ and-int/2addr v2, v0
+
+ add-int/2addr v1, v2
+
+ return v1
+.end method
diff --git a/test/565-checker-doublenegbitwise/src/Main.java b/test/565-checker-doublenegbitwise/src/Main.java
index e36a2bab40..5121569632 100644
--- a/test/565-checker-doublenegbitwise/src/Main.java
+++ b/test/565-checker-doublenegbitwise/src/Main.java
@@ -52,305 +52,22 @@ public class Main {
}
}
- /**
- * Test transformation of Not/Not/And into Or/Not.
- */
-
- // Note: before the instruction_simplifier pass, Xor's are used instead of
- // Not's (the simplification happens during the same pass).
- /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (before)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
- /// CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
- /// CHECK-DAG: <<And:i\d+>> And [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: Return [<<And>>]
-
- /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<Or:i\d+>> Or [<<P1>>,<<P2>>]
- /// CHECK-DAG: <<Not:i\d+>> Not [<<Or>>]
- /// CHECK-DAG: Return [<<Not>>]
-
- /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
- /// CHECK-DAG: Not
- /// CHECK-NOT: Not
-
- /// CHECK-START: int Main.$opt$noinline$andToOr(int, int) instruction_simplifier (after)
- /// CHECK-NOT: And
-
- public static int $opt$noinline$andToOr(int a, int b) {
- if (doThrow) throw new Error();
- return ~a & ~b;
- }
-
- /**
- * Test transformation of Not/Not/And into Or/Not for boolean negations.
- * Note that the graph before this instruction simplification pass does not
- * contain `HBooleanNot` instructions. This is because this transformation
- * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
- * same pass.
- */
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_gvn (before)
- /// CHECK-DAG: <<P1:z\d+>> ParameterValue
- /// CHECK-DAG: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK-DAG: <<And:i\d+>> And [<<Select1>>,<<Select2>>]
- /// CHECK-DAG: Return [<<And>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_gvn (after)
- /// CHECK-DAG: <<Cond1:z\d+>> ParameterValue
- /// CHECK-DAG: <<Cond2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Or:i\d+>> Or [<<Cond1>>,<<Cond2>>]
- /// CHECK-DAG: <<BooleanNot:z\d+>> BooleanNot [<<Or>>]
- /// CHECK-DAG: Return [<<BooleanNot>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
- /// CHECK-DAG: BooleanNot
- /// CHECK-NOT: BooleanNot
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanAndToOr(boolean, boolean) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: And
-
- public static boolean $opt$noinline$booleanAndToOr(boolean a, boolean b) {
- if (doThrow) throw new Error();
- return !a & !b;
- }
-
- /**
- * Test transformation of Not/Not/Or into And/Not.
- */
-
- // See note above.
- // The second Xor has its arguments reversed for no obvious reason.
- /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (before)
- /// CHECK-DAG: <<P1:j\d+>> ParameterValue
- /// CHECK-DAG: <<P2:j\d+>> ParameterValue
- /// CHECK-DAG: <<CstM1:j\d+>> LongConstant -1
- /// CHECK-DAG: <<Not1:j\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK-DAG: <<Not2:j\d+>> Xor [<<CstM1>>,<<P2>>]
- /// CHECK-DAG: <<Or:j\d+>> Or [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: Return [<<Or>>]
-
- /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
- /// CHECK-DAG: <<P1:j\d+>> ParameterValue
- /// CHECK-DAG: <<P2:j\d+>> ParameterValue
- /// CHECK-DAG: <<And:j\d+>> And [<<P1>>,<<P2>>]
- /// CHECK-DAG: <<Not:j\d+>> Not [<<And>>]
- /// CHECK-DAG: Return [<<Not>>]
-
- /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
- /// CHECK-DAG: Not
- /// CHECK-NOT: Not
-
- /// CHECK-START: long Main.$opt$noinline$orToAnd(long, long) instruction_simplifier (after)
- /// CHECK-NOT: Or
-
- public static long $opt$noinline$orToAnd(long a, long b) {
- if (doThrow) throw new Error();
- return ~a | ~b;
- }
-
- /**
- * Test transformation of Not/Not/Or into Or/And for boolean negations.
- * Note that the graph before this instruction simplification pass does not
- * contain `HBooleanNot` instructions. This is because this transformation
- * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
- * same pass.
- */
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_gvn (before)
- /// CHECK-DAG: <<P1:z\d+>> ParameterValue
- /// CHECK-DAG: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK-DAG: <<Or:i\d+>> Or [<<Select1>>,<<Select2>>]
- /// CHECK-DAG: Return [<<Or>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_gvn (after)
- /// CHECK-DAG: <<Cond1:z\d+>> ParameterValue
- /// CHECK-DAG: <<Cond2:z\d+>> ParameterValue
- /// CHECK-DAG: <<And:i\d+>> And [<<Cond1>>,<<Cond2>>]
- /// CHECK-DAG: <<BooleanNot:z\d+>> BooleanNot [<<And>>]
- /// CHECK-DAG: Return [<<BooleanNot>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
- /// CHECK-DAG: BooleanNot
- /// CHECK-NOT: BooleanNot
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanOrToAnd(boolean, boolean) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: Or
-
- public static boolean $opt$noinline$booleanOrToAnd(boolean a, boolean b) {
- if (doThrow) throw new Error();
- return !a | !b;
- }
-
- /**
- * Test that the transformation copes with inputs being separated from the
- * bitwise operations.
- * This is a regression test. The initial logic was inserting the new bitwise
- * operation incorrectly.
- */
-
- /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (before)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
- /// CHECK-DAG: <<AddP1:i\d+>> Add [<<P1>>,<<Cst1>>]
- /// CHECK-DAG: <<Not1:i\d+>> Xor [<<AddP1>>,<<CstM1>>]
- /// CHECK-DAG: <<AddP2:i\d+>> Add [<<P2>>,<<Cst1>>]
- /// CHECK-DAG: <<Not2:i\d+>> Xor [<<AddP2>>,<<CstM1>>]
- /// CHECK-DAG: <<Or:i\d+>> Or [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: Return [<<Or>>]
-
- /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<Cst1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<AddP1:i\d+>> Add [<<P1>>,<<Cst1>>]
- /// CHECK-DAG: <<AddP2:i\d+>> Add [<<P2>>,<<Cst1>>]
- /// CHECK-DAG: <<And:i\d+>> And [<<AddP1>>,<<AddP2>>]
- /// CHECK-DAG: <<Not:i\d+>> Not [<<And>>]
- /// CHECK-DAG: Return [<<Not>>]
-
- /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
- /// CHECK-DAG: Not
- /// CHECK-NOT: Not
-
- /// CHECK-START: int Main.$opt$noinline$regressInputsAway(int, int) instruction_simplifier (after)
- /// CHECK-NOT: Or
-
- public static int $opt$noinline$regressInputsAway(int a, int b) {
- if (doThrow) throw new Error();
- int a1 = a + 1;
- int not_a1 = ~a1;
- int b1 = b + 1;
- int not_b1 = ~b1;
- return not_a1 | not_b1;
- }
-
- /**
- * Test transformation of Not/Not/Xor into Xor.
- */
-
- // See first note above.
- /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (before)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
- /// CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
- /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: Return [<<Xor>>]
-
- /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<Xor:i\d+>> Xor [<<P1>>,<<P2>>]
- /// CHECK-DAG: Return [<<Xor>>]
-
- /// CHECK-START: int Main.$opt$noinline$notXorToXor(int, int) instruction_simplifier (after)
- /// CHECK-NOT: Not
-
- public static int $opt$noinline$notXorToXor(int a, int b) {
- if (doThrow) throw new Error();
- return ~a ^ ~b;
- }
-
- /**
- * Test transformation of Not/Not/Xor into Xor for boolean negations.
- * Note that the graph before this instruction simplification pass does not
- * contain `HBooleanNot` instructions. This is because this transformation
- * follows the optimization of `HSelect` to `HBooleanNot` occurring in the
- * same pass.
- */
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_gvn (before)
- /// CHECK-DAG: <<P1:z\d+>> ParameterValue
- /// CHECK-DAG: <<P2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Select1:i\d+>> Select [<<Const1>>,<<Const0>>,<<P1>>]
- /// CHECK-DAG: <<Select2:i\d+>> Select [<<Const1>>,<<Const0>>,<<P2>>]
- /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Select1>>,<<Select2>>]
- /// CHECK-DAG: Return [<<Xor>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_gvn (after)
- /// CHECK-DAG: <<Cond1:z\d+>> ParameterValue
- /// CHECK-DAG: <<Cond2:z\d+>> ParameterValue
- /// CHECK-DAG: <<Xor:i\d+>> Xor [<<Cond1>>,<<Cond2>>]
- /// CHECK-DAG: Return [<<Xor>>]
-
- /// CHECK-START: boolean Main.$opt$noinline$booleanNotXorToXor(boolean, boolean) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: BooleanNot
-
- public static boolean $opt$noinline$booleanNotXorToXor(boolean a, boolean b) {
- if (doThrow) throw new Error();
- return !a ^ !b;
- }
-
- /**
- * Check that no transformation is done when one Not has multiple uses.
- */
-
- /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (before)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<CstM1:i\d+>> IntConstant -1
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Not2:i\d+>> Xor [<<P2>>,<<CstM1>>]
- /// CHECK-DAG: <<And2:i\d+>> And [<<Not2>>,<<One>>]
- /// CHECK-DAG: <<Not1:i\d+>> Xor [<<P1>>,<<CstM1>>]
- /// CHECK-DAG: <<And1:i\d+>> And [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<And2>>,<<And1>>]
- /// CHECK-DAG: Return [<<Add>>]
-
- /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
- /// CHECK-DAG: <<P1:i\d+>> ParameterValue
- /// CHECK-DAG: <<P2:i\d+>> ParameterValue
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Not2:i\d+>> Not [<<P2>>]
- /// CHECK-DAG: <<And2:i\d+>> And [<<Not2>>,<<One>>]
- /// CHECK-DAG: <<Not1:i\d+>> Not [<<P1>>]
- /// CHECK-DAG: <<And1:i\d+>> And [<<Not1>>,<<Not2>>]
- /// CHECK-DAG: <<Add:i\d+>> Add [<<And2>>,<<And1>>]
- /// CHECK-DAG: Return [<<Add>>]
-
- /// CHECK-START: int Main.$opt$noinline$notMultipleUses(int, int) instruction_simplifier (after)
- /// CHECK-NOT: Or
-
- public static int $opt$noinline$notMultipleUses(int a, int b) {
- if (doThrow) throw new Error();
- int tmp = ~b;
- return (tmp & 0x1) + (~a & tmp);
- }
-
- public static void main(String[] args) {
- assertIntEquals(~0xff, $opt$noinline$andToOr(0xf, 0xff));
+ public static void main(String[] args) throws Exception {
+ assertIntEquals(~0xff, $noinline$runSmaliTest("$opt$noinline$andToOrV2", int.class, 0xf, 0xff));
assertIntEquals(~0xff, $noinline$runSmaliTest("$opt$noinline$andToOr", int.class, 0xf, 0xff));
- assertEquals(true, $opt$noinline$booleanAndToOr(false, false));
+ assertEquals(true, $noinline$runSmaliTest("$opt$noinline$booleanAndToOrV2", boolean.class, false, false));
assertEquals(true, $noinline$runSmaliTest("$opt$noinline$booleanAndToOr", boolean.class, false, false));
- assertLongEquals(~0xf, $opt$noinline$orToAnd(0xf, 0xff));
+ assertLongEquals(~0xf, $noinline$runSmaliTest("$opt$noinline$orToAndV2", long.class, 0xfL, 0xffL));
assertLongEquals(~0xf, $noinline$runSmaliTest("$opt$noinline$orToAnd", long.class, 0xfL, 0xffL));
- assertEquals(false, $opt$noinline$booleanOrToAnd(true, true));
+ assertEquals(false, $noinline$runSmaliTest("$opt$noinline$booleanOrToAndV2", boolean.class, true, true));
assertEquals(false, $noinline$runSmaliTest("$opt$noinline$booleanOrToAnd", boolean.class, true, true));
- assertIntEquals(-1, $opt$noinline$regressInputsAway(0xf, 0xff));
+ assertIntEquals(-1, $noinline$runSmaliTest("$opt$noinline$regressInputsAwayV2", int.class, 0xf, 0xff));
assertIntEquals(-1, $noinline$runSmaliTest("$opt$noinline$regressInputsAway", int.class, 0xf, 0xff));
- assertIntEquals(0xf0, $opt$noinline$notXorToXor(0xf, 0xff));
+ assertIntEquals(0xf0, $noinline$runSmaliTest("$opt$noinline$notXorToXorV2", int.class, 0xf, 0xff));
assertIntEquals(0xf0, $noinline$runSmaliTest("$opt$noinline$notXorToXor", int.class, 0xf, 0xff));
- assertEquals(true, $opt$noinline$booleanNotXorToXor(true, false));
+ assertEquals(true, $noinline$runSmaliTest("$opt$noinline$booleanNotXorToXorV2", boolean.class, true, false));
assertEquals(true, $noinline$runSmaliTest("$opt$noinline$booleanNotXorToXor", boolean.class, true, false));
- assertIntEquals(~0xff, $opt$noinline$notMultipleUses(0xf, 0xff));
+ assertIntEquals(~0xff, $noinline$runSmaliTest("$opt$noinline$notMultipleUsesV2", int.class, 0xf, 0xff));
assertIntEquals(~0xff, $noinline$runSmaliTest("$opt$noinline$notMultipleUses", int.class, 0xf, 0xff));
}
}
diff --git a/test/565-checker-rotate/build b/test/565-checker-rotate/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/565-checker-rotate/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/565-checker-rotate/smali/Main2.smali b/test/565-checker-rotate/smali/Main2.smali
new file mode 100644
index 0000000000..ca5027e971
--- /dev/null
+++ b/test/565-checker-rotate/smali/Main2.smali
@@ -0,0 +1,165 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMain2;
+.super Ljava/lang/Object;
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) intrinsics_recognition (after)
+## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+## CHECK-DAG: <<Result:i\d+>> Ror [<<Val>>,<<NegDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
+## CHECK-NOT: InvokeStaticOrDirect
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) select_generator (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<SelVal:i\d+>> Select [<<Zero>>,<<One>>,<<ArgVal>>]
+## CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+## CHECK-DAG: <<Result:i\d+>> Ror [<<SelVal>>,<<NegDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) select_generator (after)
+## CHECK-NOT: Phi
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+## CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
+## CHECK-NOT: Select
+
+# Original java source
+#
+# private static int rotateLeftBoolean(boolean value, int distance) {
+# return Integer.rotateLeft(value ? 1 : 0, distance);
+# }
+
+.method public static rotateLeftBoolean(ZI)I
+ .registers 3
+ .param p0, "value" # Z
+ .param p1, "distance" # I
+
+ .prologue
+ .line 66
+ if-eqz p0, :cond_8
+
+ const/4 v0, 0x1
+
+ :goto_3
+ invoke-static {v0, p1}, Ljava/lang/Integer;->rotateLeft(II)I
+
+ move-result v0
+
+ return v0
+
+ :cond_8
+ const/4 v0, 0x0
+
+ goto :goto_3
+.end method
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) intrinsics_recognition (after)
+## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<Result:i\d+>> Ror [<<Val>>,<<ArgDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier (after)
+## CHECK-NOT: InvokeStaticOrDirect
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) select_generator (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<SelVal:i\d+>> Select [<<Zero>>,<<One>>,<<ArgVal>>]
+## CHECK-DAG: <<Result:i\d+>> Ror [<<SelVal>>,<<ArgDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) select_generator (after)
+## CHECK-NOT: Phi
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
+## CHECK: <<ArgVal:z\d+>> ParameterValue
+## CHECK: <<ArgDist:i\d+>> ParameterValue
+## CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
+## CHECK-NOT: Select
+
+# Original java source:
+#
+# private static int rotateRightBoolean(boolean value, int distance) {
+# return Integer.rotateRight(value ? 1 : 0, distance);
+# }
+
+.method public static rotateRightBoolean(ZI)I
+ .registers 3
+ .param p0, "value" # Z
+ .param p1, "distance" # I
+
+ .prologue
+ .line 219
+ if-eqz p0, :cond_8
+
+ const/4 v0, 0x1
+
+ :goto_3
+ invoke-static {v0, p1}, Ljava/lang/Integer;->rotateRight(II)I
+
+ move-result v0
+
+ return v0
+
+ :cond_8
+ const/4 v0, 0x0
+
+ goto :goto_3
+.end method
diff --git a/test/565-checker-rotate/src-art/Main.java b/test/565-checker-rotate/src-art/Main.java
new file mode 100644
index 0000000000..b9e1315bd4
--- /dev/null
+++ b/test/565-checker-rotate/src-art/Main.java
@@ -0,0 +1,546 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ private static Class main2;
+
+ /// CHECK-START: int Main.rotateLeftByte(byte, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:b\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:b\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateLeftByte(byte value, int distance) {
+ return Integer.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateLeftShort(short, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:s\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:s\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateLeftShort(short value, int distance) {
+ return Integer.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateLeftChar(char, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:c\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:c\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateLeftChar(char value, int distance) {
+ return Integer.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateLeftInt(int, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateLeftInt(int value, int distance) {
+ return Integer.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: long Main.rotateLeftLong(long, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:j\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:j\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:j\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static long rotateLeftLong(long value, int distance) {
+ return Long.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateRightByte(byte, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:b\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:b\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateRightByte(byte value, int distance) {
+ return Integer.rotateRight(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateRightShort(short, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:s\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:s\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateRightShort(short value, int distance) {
+ return Integer.rotateRight(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateRightChar(char, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:c\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:c\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateRightChar(char value, int distance) {
+ return Integer.rotateRight(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateRightInt(int, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateRightInt(int value, int distance) {
+ return Integer.rotateRight(value, distance);
+ }
+
+ /// CHECK-START: long Main.rotateRightLong(long, int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:j\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:j\d+>> ParameterValue
+ /// CHECK: <<ArgDist:i\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:j\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static long rotateRightLong(long value, int distance) {
+ return Long.rotateRight(value, distance);
+ }
+
+
+ /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:b\d+>> ParameterValue
+ /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateLeftIntWithByteDistance(int value, byte distance) {
+ return Integer.rotateLeft(value, distance);
+ }
+
+ /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
+ /// CHECK: <<ArgVal:i\d+>> ParameterValue
+ /// CHECK: <<ArgDist:b\d+>> ParameterValue
+ /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int rotateRightIntWithByteDistance(int value, byte distance) {
+ return Integer.rotateRight(value, distance);
+ }
+
+
+ public static void testRotateLeftBoolean() throws Exception {
+ Method rotateLeftBoolean = main2.getMethod("rotateLeftBoolean", boolean.class, int.class);
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0, (int)rotateLeftBoolean.invoke(null, false, i));
+ expectEqualsInt(1 << i, (int)rotateLeftBoolean.invoke(null, true, i));
+ }
+ }
+
+ public static void testRotateLeftByte() {
+ expectEqualsInt(0x00000001, rotateLeftByte((byte)0x01, 0));
+ expectEqualsInt(0x00000002, rotateLeftByte((byte)0x01, 1));
+ expectEqualsInt(0x80000000, rotateLeftByte((byte)0x01, 31));
+ expectEqualsInt(0x00000001, rotateLeftByte((byte)0x01, 32)); // overshoot
+ expectEqualsInt(0xFFFFFF03, rotateLeftByte((byte)0x81, 1));
+ expectEqualsInt(0xFFFFFE07, rotateLeftByte((byte)0x81, 2));
+ expectEqualsInt(0x00000120, rotateLeftByte((byte)0x12, 4));
+ expectEqualsInt(0xFFFF9AFF, rotateLeftByte((byte)0x9A, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateLeftByte((byte)0x0000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateLeftByte((byte)0xFFFF, i));
+ expectEqualsInt((1 << j), rotateLeftByte((byte)0x0001, i));
+ expectEqualsInt((0x12 << j) | (0x12 >>> -j), rotateLeftByte((byte)0x12, i));
+ }
+ }
+
+ public static void testRotateLeftShort() {
+ expectEqualsInt(0x00000001, rotateLeftShort((short)0x0001, 0));
+ expectEqualsInt(0x00000002, rotateLeftShort((short)0x0001, 1));
+ expectEqualsInt(0x80000000, rotateLeftShort((short)0x0001, 31));
+ expectEqualsInt(0x00000001, rotateLeftShort((short)0x0001, 32)); // overshoot
+ expectEqualsInt(0xFFFF0003, rotateLeftShort((short)0x8001, 1));
+ expectEqualsInt(0xFFFE0007, rotateLeftShort((short)0x8001, 2));
+ expectEqualsInt(0x00012340, rotateLeftShort((short)0x1234, 4));
+ expectEqualsInt(0xFF9ABCFF, rotateLeftShort((short)0x9ABC, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateLeftShort((short)0x0000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateLeftShort((short)0xFFFF, i));
+ expectEqualsInt((1 << j), rotateLeftShort((short)0x0001, i));
+ expectEqualsInt((0x1234 << j) | (0x1234 >>> -j), rotateLeftShort((short)0x1234, i));
+ }
+ }
+
+ public static void testRotateLeftChar() {
+ expectEqualsInt(0x00000001, rotateLeftChar((char)0x0001, 0));
+ expectEqualsInt(0x00000002, rotateLeftChar((char)0x0001, 1));
+ expectEqualsInt(0x80000000, rotateLeftChar((char)0x0001, 31));
+ expectEqualsInt(0x00000001, rotateLeftChar((char)0x0001, 32)); // overshoot
+ expectEqualsInt(0x00010002, rotateLeftChar((char)0x8001, 1));
+ expectEqualsInt(0x00020004, rotateLeftChar((char)0x8001, 2));
+ expectEqualsInt(0x00012340, rotateLeftChar((char)0x1234, 4));
+ expectEqualsInt(0x009ABC00, rotateLeftChar((char)0x9ABC, 8));
+ expectEqualsInt(0x00FF0000, rotateLeftChar((char)0xFF00, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateLeftChar((char)0x0000, i));
+ expectEqualsInt((1 << j), rotateLeftChar((char)0x0001, i));
+ expectEqualsInt((0x1234 << j) | (0x1234 >>> -j), rotateLeftChar((char)0x1234, i));
+ }
+ }
+
+ public static void testRotateLeftInt() {
+ expectEqualsInt(0x00000001, rotateLeftInt(0x00000001, 0));
+ expectEqualsInt(0x00000002, rotateLeftInt(0x00000001, 1));
+ expectEqualsInt(0x80000000, rotateLeftInt(0x00000001, 31));
+ expectEqualsInt(0x00000001, rotateLeftInt(0x00000001, 32)); // overshoot
+ expectEqualsInt(0x00000003, rotateLeftInt(0x80000001, 1));
+ expectEqualsInt(0x00000006, rotateLeftInt(0x80000001, 2));
+ expectEqualsInt(0x23456781, rotateLeftInt(0x12345678, 4));
+ expectEqualsInt(0xBCDEF09A, rotateLeftInt(0x9ABCDEF0, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateLeftInt(0x00000000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateLeftInt(0xFFFFFFFF, i));
+ expectEqualsInt(1 << j, rotateLeftInt(0x00000001, i));
+ expectEqualsInt((0x12345678 << j) | (0x12345678 >>> -j), rotateLeftInt(0x12345678, i));
+ }
+ }
+
+ public static void testRotateLeftLong() {
+ expectEqualsLong(0x0000000000000001L, rotateLeftLong(0x0000000000000001L, 0));
+ expectEqualsLong(0x0000000000000002L, rotateLeftLong(0x0000000000000001L, 1));
+ expectEqualsLong(0x8000000000000000L, rotateLeftLong(0x0000000000000001L, 63));
+ expectEqualsLong(0x0000000000000001L, rotateLeftLong(0x0000000000000001L, 64)); // overshoot
+ expectEqualsLong(0x0000000000000003L, rotateLeftLong(0x8000000000000001L, 1));
+ expectEqualsLong(0x0000000000000006L, rotateLeftLong(0x8000000000000001L, 2));
+ expectEqualsLong(0x23456789ABCDEF01L, rotateLeftLong(0x123456789ABCDEF0L, 4));
+ expectEqualsLong(0x3456789ABCDEF012L, rotateLeftLong(0x123456789ABCDEF0L, 8));
+ for (int i = 0; i < 70; i++) { // overshoot a bit
+ int j = i & 63;
+ expectEqualsLong(0x0000000000000000L, rotateLeftLong(0x0000000000000000L, i));
+ expectEqualsLong(0xFFFFFFFFFFFFFFFFL, rotateLeftLong(0xFFFFFFFFFFFFFFFFL, i));
+ expectEqualsLong(1L << j, rotateLeftLong(0x0000000000000001, i));
+ expectEqualsLong((0x123456789ABCDEF0L << j) | (0x123456789ABCDEF0L >>> -j),
+ rotateLeftLong(0x123456789ABCDEF0L, i));
+ }
+ }
+
+ public static void testRotateRightBoolean() throws Exception {
+ Method rotateRightBoolean = main2.getMethod("rotateRightBoolean", boolean.class, int.class);
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0, (int)rotateRightBoolean.invoke(null, false, i));
+ expectEqualsInt(1 << (32 - i), (int)rotateRightBoolean.invoke(null, true, i));
+ }
+ }
+
+ public static void testRotateRightByte() {
+ expectEqualsInt(0xFFFFFF80, rotateRightByte((byte)0x80, 0));
+ expectEqualsInt(0x7FFFFFC0, rotateRightByte((byte)0x80, 1));
+ expectEqualsInt(0xFFFFFF01, rotateRightByte((byte)0x80, 31));
+ expectEqualsInt(0xFFFFFF80, rotateRightByte((byte)0x80, 32)); // overshoot
+ expectEqualsInt(0xFFFFFFC0, rotateRightByte((byte)0x81, 1));
+ expectEqualsInt(0x7FFFFFE0, rotateRightByte((byte)0x81, 2));
+ expectEqualsInt(0x20000001, rotateRightByte((byte)0x12, 4));
+ expectEqualsInt(0x9AFFFFFF, rotateRightByte((byte)0x9A, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateRightByte((byte)0x00, i));
+ expectEqualsInt(0xFFFFFFFF, rotateRightByte((byte)0xFF, i));
+ expectEqualsInt(1 << (32 - j), rotateRightByte((byte)0x01, i));
+ expectEqualsInt((0x12 >>> j) | (0x12 << -j), rotateRightByte((byte)0x12, i));
+ }
+ }
+
+ public static void testRotateRightShort() {
+ expectEqualsInt(0xFFFF8000, rotateRightShort((short)0x8000, 0));
+ expectEqualsInt(0x7FFFC000, rotateRightShort((short)0x8000, 1));
+ expectEqualsInt(0xFFFF0001, rotateRightShort((short)0x8000, 31));
+ expectEqualsInt(0xFFFF8000, rotateRightShort((short)0x8000, 32)); // overshoot
+ expectEqualsInt(0xFFFFC000, rotateRightShort((short)0x8001, 1));
+ expectEqualsInt(0x7FFFE000, rotateRightShort((short)0x8001, 2));
+ expectEqualsInt(0x40000123, rotateRightShort((short)0x1234, 4));
+ expectEqualsInt(0xBCFFFF9A, rotateRightShort((short)0x9ABC, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateRightShort((short)0x0000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateRightShort((short)0xFFFF, i));
+ expectEqualsInt(1 << (32 - j), rotateRightShort((short)0x0001, i));
+ expectEqualsInt((0x1234 >>> j) | (0x1234 << -j), rotateRightShort((short)0x1234, i));
+ }
+ }
+
+ public static void testRotateRightChar() {
+ expectEqualsInt(0x00008000, rotateRightChar((char)0x8000, 0));
+ expectEqualsInt(0x00004000, rotateRightChar((char)0x8000, 1));
+ expectEqualsInt(0x00010000, rotateRightChar((char)0x8000, 31));
+ expectEqualsInt(0x00008000, rotateRightChar((char)0x8000, 32)); // overshoot
+ expectEqualsInt(0x80004000, rotateRightChar((char)0x8001, 1));
+ expectEqualsInt(0x40002000, rotateRightChar((char)0x8001, 2));
+ expectEqualsInt(0x40000123, rotateRightChar((char)0x1234, 4));
+ expectEqualsInt(0xBC00009A, rotateRightChar((char)0x9ABC, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateRightChar((char)0x0000, i));
+ expectEqualsInt(1 << (32 - j), rotateRightChar((char)0x0001, i));
+ expectEqualsInt((0x1234 >>> j) | (0x1234 << -j), rotateRightChar((char)0x1234, i));
+ }
+ }
+
+ public static void testRotateRightInt() {
+ expectEqualsInt(0x80000000, rotateRightInt(0x80000000, 0));
+ expectEqualsInt(0x40000000, rotateRightInt(0x80000000, 1));
+ expectEqualsInt(0x00000001, rotateRightInt(0x80000000, 31));
+ expectEqualsInt(0x80000000, rotateRightInt(0x80000000, 32)); // overshoot
+ expectEqualsInt(0xC0000000, rotateRightInt(0x80000001, 1));
+ expectEqualsInt(0x60000000, rotateRightInt(0x80000001, 2));
+ expectEqualsInt(0x81234567, rotateRightInt(0x12345678, 4));
+ expectEqualsInt(0xF09ABCDE, rotateRightInt(0x9ABCDEF0, 8));
+ for (int i = 0; i < 40; i++) { // overshoot a bit
+ int j = i & 31;
+ expectEqualsInt(0x00000000, rotateRightInt(0x00000000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateRightInt(0xFFFFFFFF, i));
+ expectEqualsInt(0x80000000 >>> j, rotateRightInt(0x80000000, i));
+ expectEqualsInt((0x12345678 >>> j) | (0x12345678 << -j), rotateRightInt(0x12345678, i));
+ }
+ }
+
+ public static void testRotateRightLong() {
+ expectEqualsLong(0x8000000000000000L, rotateRightLong(0x8000000000000000L, 0));
+ expectEqualsLong(0x4000000000000000L, rotateRightLong(0x8000000000000000L, 1));
+ expectEqualsLong(0x0000000000000001L, rotateRightLong(0x8000000000000000L, 63));
+ expectEqualsLong(0x8000000000000000L, rotateRightLong(0x8000000000000000L, 64)); // overshoot
+ expectEqualsLong(0xC000000000000000L, rotateRightLong(0x8000000000000001L, 1));
+ expectEqualsLong(0x6000000000000000L, rotateRightLong(0x8000000000000001L, 2));
+ expectEqualsLong(0x0123456789ABCDEFL, rotateRightLong(0x123456789ABCDEF0L, 4));
+ expectEqualsLong(0xF0123456789ABCDEL, rotateRightLong(0x123456789ABCDEF0L, 8));
+ for (int i = 0; i < 70; i++) { // overshoot a bit
+ int j = i & 63;
+ expectEqualsLong(0x0000000000000000L, rotateRightLong(0x0000000000000000L, i));
+ expectEqualsLong(0xFFFFFFFFFFFFFFFFL, rotateRightLong(0xFFFFFFFFFFFFFFFFL, i));
+ expectEqualsLong(0x8000000000000000L >>> j, rotateRightLong(0x8000000000000000L, i));
+ expectEqualsLong((0x123456789ABCDEF0L >>> j) | (0x123456789ABCDEF0L << -j),
+ rotateRightLong(0x123456789ABCDEF0L, i));
+ }
+ }
+
+
+ public static void testRotateLeftIntWithByteDistance() {
+ expectEqualsInt(0x00000001, rotateLeftIntWithByteDistance(0x00000001, (byte)0));
+ expectEqualsInt(0x00000002, rotateLeftIntWithByteDistance(0x00000001, (byte)1));
+ expectEqualsInt(0x80000000, rotateLeftIntWithByteDistance(0x00000001, (byte)31));
+ expectEqualsInt(0x00000001, rotateLeftIntWithByteDistance(0x00000001, (byte)32)); // overshoot
+ expectEqualsInt(0x00000003, rotateLeftIntWithByteDistance(0x80000001, (byte)1));
+ expectEqualsInt(0x00000006, rotateLeftIntWithByteDistance(0x80000001, (byte)2));
+ expectEqualsInt(0x23456781, rotateLeftIntWithByteDistance(0x12345678, (byte)4));
+ expectEqualsInt(0xBCDEF09A, rotateLeftIntWithByteDistance(0x9ABCDEF0, (byte)8));
+ for (byte i = 0; i < 40; i++) { // overshoot a bit
+ byte j = (byte)(i & 31);
+ expectEqualsInt(0x00000000, rotateLeftIntWithByteDistance(0x00000000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateLeftIntWithByteDistance(0xFFFFFFFF, i));
+ expectEqualsInt(1 << j, rotateLeftIntWithByteDistance(0x00000001, i));
+ expectEqualsInt((0x12345678 << j) | (0x12345678 >>> -j),
+ rotateLeftIntWithByteDistance(0x12345678, i));
+ }
+ }
+
+ public static void testRotateRightIntWithByteDistance() {
+ expectEqualsInt(0x80000000, rotateRightIntWithByteDistance(0x80000000, (byte)0));
+ expectEqualsInt(0x40000000, rotateRightIntWithByteDistance(0x80000000, (byte)1));
+ expectEqualsInt(0x00000001, rotateRightIntWithByteDistance(0x80000000, (byte)31));
+ expectEqualsInt(0x80000000, rotateRightIntWithByteDistance(0x80000000, (byte)32)); // overshoot
+ expectEqualsInt(0xC0000000, rotateRightIntWithByteDistance(0x80000001, (byte)1));
+ expectEqualsInt(0x60000000, rotateRightIntWithByteDistance(0x80000001, (byte)2));
+ expectEqualsInt(0x81234567, rotateRightIntWithByteDistance(0x12345678, (byte)4));
+ expectEqualsInt(0xF09ABCDE, rotateRightIntWithByteDistance(0x9ABCDEF0, (byte)8));
+ for (byte i = 0; i < 40; i++) { // overshoot a bit
+ byte j = (byte)(i & 31);
+ expectEqualsInt(0x00000000, rotateRightIntWithByteDistance(0x00000000, i));
+ expectEqualsInt(0xFFFFFFFF, rotateRightIntWithByteDistance(0xFFFFFFFF, i));
+ expectEqualsInt(0x80000000 >>> j, rotateRightIntWithByteDistance(0x80000000, i));
+ expectEqualsInt((0x12345678 >>> j) | (0x12345678 << -j),
+ rotateRightIntWithByteDistance(0x12345678, i));
+ }
+ }
+
+
+ public static void main(String args[]) throws Exception {
+ main2 = Class.forName("Main2");
+
+ testRotateLeftBoolean();
+ testRotateLeftByte();
+ testRotateLeftShort();
+ testRotateLeftChar();
+ testRotateLeftInt();
+ testRotateLeftLong();
+
+ testRotateRightBoolean();
+ testRotateRightByte();
+ testRotateRightShort();
+ testRotateRightChar();
+ testRotateRightInt();
+ testRotateRightLong();
+
+ // Also exercise distance values with types other than int.
+ testRotateLeftIntWithByteDistance();
+ testRotateRightIntWithByteDistance();
+
+ System.out.println("passed");
+ }
+
+
+ private static void expectEqualsInt(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ private static void expectEqualsLong(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/565-checker-rotate/src/Main.java b/test/565-checker-rotate/src/Main.java
index eb0e8688c0..79b8555878 100644
--- a/test/565-checker-rotate/src/Main.java
+++ b/test/565-checker-rotate/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,623 +14,9 @@
* limitations under the License.
*/
+// This file is just for running on the RI as the test is ART specific.
public class Main {
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<Val>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) select_generator (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<SelVal:i\d+>> Select [<<Zero>>,<<One>>,<<ArgVal>>]
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<SelVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) select_generator (after)
- /// CHECK-NOT: Phi
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftBoolean(boolean, int) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: Select
-
- private static int rotateLeftBoolean(boolean value, int distance) {
- return Integer.rotateLeft(value ? 1 : 0, distance);
- }
-
- /// CHECK-START: int Main.rotateLeftByte(byte, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:b\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:b\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftByte(byte, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateLeftByte(byte value, int distance) {
- return Integer.rotateLeft(value, distance);
- }
-
- /// CHECK-START: int Main.rotateLeftShort(short, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:s\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:s\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftShort(short, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateLeftShort(short value, int distance) {
- return Integer.rotateLeft(value, distance);
- }
-
- /// CHECK-START: int Main.rotateLeftChar(char, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:c\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:c\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftChar(char, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateLeftChar(char value, int distance) {
- return Integer.rotateLeft(value, distance);
- }
-
- /// CHECK-START: int Main.rotateLeftInt(int, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftInt(int, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateLeftInt(int value, int distance) {
- return Integer.rotateLeft(value, distance);
- }
-
- /// CHECK-START: long Main.rotateLeftLong(long, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:j\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:j\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:j\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: long Main.rotateLeftLong(long, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static long rotateLeftLong(long value, int distance) {
- return Long.rotateLeft(value, distance);
- }
-
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Val>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Val:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<Val>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) select_generator (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<SelVal:i\d+>> Select [<<Zero>>,<<One>>,<<ArgVal>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<SelVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) select_generator (after)
- /// CHECK-NOT: Phi
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
- /// CHECK: <<ArgVal:z\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightBoolean(boolean, int) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: Select
-
- private static int rotateRightBoolean(boolean value, int distance) {
- return Integer.rotateRight(value ? 1 : 0, distance);
- }
-
- /// CHECK-START: int Main.rotateRightByte(byte, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:b\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:b\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightByte(byte, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateRightByte(byte value, int distance) {
- return Integer.rotateRight(value, distance);
- }
-
- /// CHECK-START: int Main.rotateRightShort(short, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:s\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:s\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightShort(short, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateRightShort(short value, int distance) {
- return Integer.rotateRight(value, distance);
- }
-
- /// CHECK-START: int Main.rotateRightChar(char, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:c\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:c\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightChar(char, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateRightChar(char value, int distance) {
- return Integer.rotateRight(value, distance);
- }
-
- /// CHECK-START: int Main.rotateRightInt(int, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightInt(int, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateRightInt(int value, int distance) {
- return Integer.rotateRight(value, distance);
- }
-
- /// CHECK-START: long Main.rotateRightLong(long, int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:j\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:LongRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
- /// CHECK: <<ArgVal:j\d+>> ParameterValue
- /// CHECK: <<ArgDist:i\d+>> ParameterValue
- /// CHECK-DAG: <<Result:j\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: long Main.rotateRightLong(long, int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static long rotateRightLong(long value, int distance) {
- return Long.rotateRight(value, distance);
- }
-
-
- /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateLeft
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<NegDist:i\d+>> Neg [<<ArgDist>>]
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<NegDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateLeftIntWithByteDistance(int, byte) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateLeftIntWithByteDistance(int value, byte distance) {
- return Integer.rotateLeft(value, distance);
- }
-
- /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<ArgVal>>,<<ArgDist>>,<<Method>>] intrinsic:IntegerRotateRight
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
- /// CHECK: <<ArgVal:i\d+>> ParameterValue
- /// CHECK: <<ArgDist:b\d+>> ParameterValue
- /// CHECK-DAG: <<Result:i\d+>> Ror [<<ArgVal>>,<<ArgDist>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.rotateRightIntWithByteDistance(int, byte) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int rotateRightIntWithByteDistance(int value, byte distance) {
- return Integer.rotateRight(value, distance);
- }
-
-
- public static void testRotateLeftBoolean() {
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0, rotateLeftBoolean(false, i));
- expectEqualsInt(1 << i, rotateLeftBoolean(true, i));
- }
- }
-
- public static void testRotateLeftByte() {
- expectEqualsInt(0x00000001, rotateLeftByte((byte)0x01, 0));
- expectEqualsInt(0x00000002, rotateLeftByte((byte)0x01, 1));
- expectEqualsInt(0x80000000, rotateLeftByte((byte)0x01, 31));
- expectEqualsInt(0x00000001, rotateLeftByte((byte)0x01, 32)); // overshoot
- expectEqualsInt(0xFFFFFF03, rotateLeftByte((byte)0x81, 1));
- expectEqualsInt(0xFFFFFE07, rotateLeftByte((byte)0x81, 2));
- expectEqualsInt(0x00000120, rotateLeftByte((byte)0x12, 4));
- expectEqualsInt(0xFFFF9AFF, rotateLeftByte((byte)0x9A, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateLeftByte((byte)0x0000, i));
- expectEqualsInt(0xFFFFFFFF, rotateLeftByte((byte)0xFFFF, i));
- expectEqualsInt((1 << j), rotateLeftByte((byte)0x0001, i));
- expectEqualsInt((0x12 << j) | (0x12 >>> -j), rotateLeftByte((byte)0x12, i));
- }
- }
-
- public static void testRotateLeftShort() {
- expectEqualsInt(0x00000001, rotateLeftShort((short)0x0001, 0));
- expectEqualsInt(0x00000002, rotateLeftShort((short)0x0001, 1));
- expectEqualsInt(0x80000000, rotateLeftShort((short)0x0001, 31));
- expectEqualsInt(0x00000001, rotateLeftShort((short)0x0001, 32)); // overshoot
- expectEqualsInt(0xFFFF0003, rotateLeftShort((short)0x8001, 1));
- expectEqualsInt(0xFFFE0007, rotateLeftShort((short)0x8001, 2));
- expectEqualsInt(0x00012340, rotateLeftShort((short)0x1234, 4));
- expectEqualsInt(0xFF9ABCFF, rotateLeftShort((short)0x9ABC, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateLeftShort((short)0x0000, i));
- expectEqualsInt(0xFFFFFFFF, rotateLeftShort((short)0xFFFF, i));
- expectEqualsInt((1 << j), rotateLeftShort((short)0x0001, i));
- expectEqualsInt((0x1234 << j) | (0x1234 >>> -j), rotateLeftShort((short)0x1234, i));
- }
- }
-
- public static void testRotateLeftChar() {
- expectEqualsInt(0x00000001, rotateLeftChar((char)0x0001, 0));
- expectEqualsInt(0x00000002, rotateLeftChar((char)0x0001, 1));
- expectEqualsInt(0x80000000, rotateLeftChar((char)0x0001, 31));
- expectEqualsInt(0x00000001, rotateLeftChar((char)0x0001, 32)); // overshoot
- expectEqualsInt(0x00010002, rotateLeftChar((char)0x8001, 1));
- expectEqualsInt(0x00020004, rotateLeftChar((char)0x8001, 2));
- expectEqualsInt(0x00012340, rotateLeftChar((char)0x1234, 4));
- expectEqualsInt(0x009ABC00, rotateLeftChar((char)0x9ABC, 8));
- expectEqualsInt(0x00FF0000, rotateLeftChar((char)0xFF00, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateLeftChar((char)0x0000, i));
- expectEqualsInt((1 << j), rotateLeftChar((char)0x0001, i));
- expectEqualsInt((0x1234 << j) | (0x1234 >>> -j), rotateLeftChar((char)0x1234, i));
- }
- }
-
- public static void testRotateLeftInt() {
- expectEqualsInt(0x00000001, rotateLeftInt(0x00000001, 0));
- expectEqualsInt(0x00000002, rotateLeftInt(0x00000001, 1));
- expectEqualsInt(0x80000000, rotateLeftInt(0x00000001, 31));
- expectEqualsInt(0x00000001, rotateLeftInt(0x00000001, 32)); // overshoot
- expectEqualsInt(0x00000003, rotateLeftInt(0x80000001, 1));
- expectEqualsInt(0x00000006, rotateLeftInt(0x80000001, 2));
- expectEqualsInt(0x23456781, rotateLeftInt(0x12345678, 4));
- expectEqualsInt(0xBCDEF09A, rotateLeftInt(0x9ABCDEF0, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateLeftInt(0x00000000, i));
- expectEqualsInt(0xFFFFFFFF, rotateLeftInt(0xFFFFFFFF, i));
- expectEqualsInt(1 << j, rotateLeftInt(0x00000001, i));
- expectEqualsInt((0x12345678 << j) | (0x12345678 >>> -j), rotateLeftInt(0x12345678, i));
- }
- }
-
- public static void testRotateLeftLong() {
- expectEqualsLong(0x0000000000000001L, rotateLeftLong(0x0000000000000001L, 0));
- expectEqualsLong(0x0000000000000002L, rotateLeftLong(0x0000000000000001L, 1));
- expectEqualsLong(0x8000000000000000L, rotateLeftLong(0x0000000000000001L, 63));
- expectEqualsLong(0x0000000000000001L, rotateLeftLong(0x0000000000000001L, 64)); // overshoot
- expectEqualsLong(0x0000000000000003L, rotateLeftLong(0x8000000000000001L, 1));
- expectEqualsLong(0x0000000000000006L, rotateLeftLong(0x8000000000000001L, 2));
- expectEqualsLong(0x23456789ABCDEF01L, rotateLeftLong(0x123456789ABCDEF0L, 4));
- expectEqualsLong(0x3456789ABCDEF012L, rotateLeftLong(0x123456789ABCDEF0L, 8));
- for (int i = 0; i < 70; i++) { // overshoot a bit
- int j = i & 63;
- expectEqualsLong(0x0000000000000000L, rotateLeftLong(0x0000000000000000L, i));
- expectEqualsLong(0xFFFFFFFFFFFFFFFFL, rotateLeftLong(0xFFFFFFFFFFFFFFFFL, i));
- expectEqualsLong(1L << j, rotateLeftLong(0x0000000000000001, i));
- expectEqualsLong((0x123456789ABCDEF0L << j) | (0x123456789ABCDEF0L >>> -j),
- rotateLeftLong(0x123456789ABCDEF0L, i));
- }
- }
-
- public static void testRotateRightBoolean() {
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0, rotateRightBoolean(false, i));
- expectEqualsInt(1 << (32 - i), rotateRightBoolean(true, i));
- }
- }
-
- public static void testRotateRightByte() {
- expectEqualsInt(0xFFFFFF80, rotateRightByte((byte)0x80, 0));
- expectEqualsInt(0x7FFFFFC0, rotateRightByte((byte)0x80, 1));
- expectEqualsInt(0xFFFFFF01, rotateRightByte((byte)0x80, 31));
- expectEqualsInt(0xFFFFFF80, rotateRightByte((byte)0x80, 32)); // overshoot
- expectEqualsInt(0xFFFFFFC0, rotateRightByte((byte)0x81, 1));
- expectEqualsInt(0x7FFFFFE0, rotateRightByte((byte)0x81, 2));
- expectEqualsInt(0x20000001, rotateRightByte((byte)0x12, 4));
- expectEqualsInt(0x9AFFFFFF, rotateRightByte((byte)0x9A, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateRightByte((byte)0x00, i));
- expectEqualsInt(0xFFFFFFFF, rotateRightByte((byte)0xFF, i));
- expectEqualsInt(1 << (32 - j), rotateRightByte((byte)0x01, i));
- expectEqualsInt((0x12 >>> j) | (0x12 << -j), rotateRightByte((byte)0x12, i));
- }
- }
-
- public static void testRotateRightShort() {
- expectEqualsInt(0xFFFF8000, rotateRightShort((short)0x8000, 0));
- expectEqualsInt(0x7FFFC000, rotateRightShort((short)0x8000, 1));
- expectEqualsInt(0xFFFF0001, rotateRightShort((short)0x8000, 31));
- expectEqualsInt(0xFFFF8000, rotateRightShort((short)0x8000, 32)); // overshoot
- expectEqualsInt(0xFFFFC000, rotateRightShort((short)0x8001, 1));
- expectEqualsInt(0x7FFFE000, rotateRightShort((short)0x8001, 2));
- expectEqualsInt(0x40000123, rotateRightShort((short)0x1234, 4));
- expectEqualsInt(0xBCFFFF9A, rotateRightShort((short)0x9ABC, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateRightShort((short)0x0000, i));
- expectEqualsInt(0xFFFFFFFF, rotateRightShort((short)0xFFFF, i));
- expectEqualsInt(1 << (32 - j), rotateRightShort((short)0x0001, i));
- expectEqualsInt((0x1234 >>> j) | (0x1234 << -j), rotateRightShort((short)0x1234, i));
- }
- }
-
- public static void testRotateRightChar() {
- expectEqualsInt(0x00008000, rotateRightChar((char)0x8000, 0));
- expectEqualsInt(0x00004000, rotateRightChar((char)0x8000, 1));
- expectEqualsInt(0x00010000, rotateRightChar((char)0x8000, 31));
- expectEqualsInt(0x00008000, rotateRightChar((char)0x8000, 32)); // overshoot
- expectEqualsInt(0x80004000, rotateRightChar((char)0x8001, 1));
- expectEqualsInt(0x40002000, rotateRightChar((char)0x8001, 2));
- expectEqualsInt(0x40000123, rotateRightChar((char)0x1234, 4));
- expectEqualsInt(0xBC00009A, rotateRightChar((char)0x9ABC, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateRightChar((char)0x0000, i));
- expectEqualsInt(1 << (32 - j), rotateRightChar((char)0x0001, i));
- expectEqualsInt((0x1234 >>> j) | (0x1234 << -j), rotateRightChar((char)0x1234, i));
- }
- }
-
- public static void testRotateRightInt() {
- expectEqualsInt(0x80000000, rotateRightInt(0x80000000, 0));
- expectEqualsInt(0x40000000, rotateRightInt(0x80000000, 1));
- expectEqualsInt(0x00000001, rotateRightInt(0x80000000, 31));
- expectEqualsInt(0x80000000, rotateRightInt(0x80000000, 32)); // overshoot
- expectEqualsInt(0xC0000000, rotateRightInt(0x80000001, 1));
- expectEqualsInt(0x60000000, rotateRightInt(0x80000001, 2));
- expectEqualsInt(0x81234567, rotateRightInt(0x12345678, 4));
- expectEqualsInt(0xF09ABCDE, rotateRightInt(0x9ABCDEF0, 8));
- for (int i = 0; i < 40; i++) { // overshoot a bit
- int j = i & 31;
- expectEqualsInt(0x00000000, rotateRightInt(0x00000000, i));
- expectEqualsInt(0xFFFFFFFF, rotateRightInt(0xFFFFFFFF, i));
- expectEqualsInt(0x80000000 >>> j, rotateRightInt(0x80000000, i));
- expectEqualsInt((0x12345678 >>> j) | (0x12345678 << -j), rotateRightInt(0x12345678, i));
- }
- }
-
- public static void testRotateRightLong() {
- expectEqualsLong(0x8000000000000000L, rotateRightLong(0x8000000000000000L, 0));
- expectEqualsLong(0x4000000000000000L, rotateRightLong(0x8000000000000000L, 1));
- expectEqualsLong(0x0000000000000001L, rotateRightLong(0x8000000000000000L, 63));
- expectEqualsLong(0x8000000000000000L, rotateRightLong(0x8000000000000000L, 64)); // overshoot
- expectEqualsLong(0xC000000000000000L, rotateRightLong(0x8000000000000001L, 1));
- expectEqualsLong(0x6000000000000000L, rotateRightLong(0x8000000000000001L, 2));
- expectEqualsLong(0x0123456789ABCDEFL, rotateRightLong(0x123456789ABCDEF0L, 4));
- expectEqualsLong(0xF0123456789ABCDEL, rotateRightLong(0x123456789ABCDEF0L, 8));
- for (int i = 0; i < 70; i++) { // overshoot a bit
- int j = i & 63;
- expectEqualsLong(0x0000000000000000L, rotateRightLong(0x0000000000000000L, i));
- expectEqualsLong(0xFFFFFFFFFFFFFFFFL, rotateRightLong(0xFFFFFFFFFFFFFFFFL, i));
- expectEqualsLong(0x8000000000000000L >>> j, rotateRightLong(0x8000000000000000L, i));
- expectEqualsLong((0x123456789ABCDEF0L >>> j) | (0x123456789ABCDEF0L << -j),
- rotateRightLong(0x123456789ABCDEF0L, i));
- }
- }
-
-
- public static void testRotateLeftIntWithByteDistance() {
- expectEqualsInt(0x00000001, rotateLeftIntWithByteDistance(0x00000001, (byte)0));
- expectEqualsInt(0x00000002, rotateLeftIntWithByteDistance(0x00000001, (byte)1));
- expectEqualsInt(0x80000000, rotateLeftIntWithByteDistance(0x00000001, (byte)31));
- expectEqualsInt(0x00000001, rotateLeftIntWithByteDistance(0x00000001, (byte)32)); // overshoot
- expectEqualsInt(0x00000003, rotateLeftIntWithByteDistance(0x80000001, (byte)1));
- expectEqualsInt(0x00000006, rotateLeftIntWithByteDistance(0x80000001, (byte)2));
- expectEqualsInt(0x23456781, rotateLeftIntWithByteDistance(0x12345678, (byte)4));
- expectEqualsInt(0xBCDEF09A, rotateLeftIntWithByteDistance(0x9ABCDEF0, (byte)8));
- for (byte i = 0; i < 40; i++) { // overshoot a bit
- byte j = (byte)(i & 31);
- expectEqualsInt(0x00000000, rotateLeftIntWithByteDistance(0x00000000, i));
- expectEqualsInt(0xFFFFFFFF, rotateLeftIntWithByteDistance(0xFFFFFFFF, i));
- expectEqualsInt(1 << j, rotateLeftIntWithByteDistance(0x00000001, i));
- expectEqualsInt((0x12345678 << j) | (0x12345678 >>> -j),
- rotateLeftIntWithByteDistance(0x12345678, i));
- }
- }
-
- public static void testRotateRightIntWithByteDistance() {
- expectEqualsInt(0x80000000, rotateRightIntWithByteDistance(0x80000000, (byte)0));
- expectEqualsInt(0x40000000, rotateRightIntWithByteDistance(0x80000000, (byte)1));
- expectEqualsInt(0x00000001, rotateRightIntWithByteDistance(0x80000000, (byte)31));
- expectEqualsInt(0x80000000, rotateRightIntWithByteDistance(0x80000000, (byte)32)); // overshoot
- expectEqualsInt(0xC0000000, rotateRightIntWithByteDistance(0x80000001, (byte)1));
- expectEqualsInt(0x60000000, rotateRightIntWithByteDistance(0x80000001, (byte)2));
- expectEqualsInt(0x81234567, rotateRightIntWithByteDistance(0x12345678, (byte)4));
- expectEqualsInt(0xF09ABCDE, rotateRightIntWithByteDistance(0x9ABCDEF0, (byte)8));
- for (byte i = 0; i < 40; i++) { // overshoot a bit
- byte j = (byte)(i & 31);
- expectEqualsInt(0x00000000, rotateRightIntWithByteDistance(0x00000000, i));
- expectEqualsInt(0xFFFFFFFF, rotateRightIntWithByteDistance(0xFFFFFFFF, i));
- expectEqualsInt(0x80000000 >>> j, rotateRightIntWithByteDistance(0x80000000, i));
- expectEqualsInt((0x12345678 >>> j) | (0x12345678 << -j),
- rotateRightIntWithByteDistance(0x12345678, i));
- }
- }
-
-
public static void main(String args[]) {
- testRotateLeftBoolean();
- testRotateLeftByte();
- testRotateLeftShort();
- testRotateLeftChar();
- testRotateLeftInt();
- testRotateLeftLong();
-
- testRotateRightBoolean();
- testRotateRightByte();
- testRotateRightShort();
- testRotateRightChar();
- testRotateRightInt();
- testRotateRightLong();
-
- // Also exercise distance values with types other than int.
- testRotateLeftIntWithByteDistance();
- testRotateRightIntWithByteDistance();
-
System.out.println("passed");
}
-
-
- private static void expectEqualsInt(int expected, int result) {
- if (expected != result) {
- throw new Error("Expected: " + expected + ", found: " + result);
- }
- }
-
- private static void expectEqualsLong(long expected, long result) {
- if (expected != result) {
- throw new Error("Expected: " + expected + ", found: " + result);
- }
- }
-}
+} \ No newline at end of file
diff --git a/test/566-checker-signum/build b/test/566-checker-signum/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/566-checker-signum/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/566-checker-signum/smali/Main2.smali b/test/566-checker-signum/smali/Main2.smali
new file mode 100644
index 0000000000..d99ad8662b
--- /dev/null
+++ b/test/566-checker-signum/smali/Main2.smali
@@ -0,0 +1,83 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMain2;
+.super Ljava/lang/Object;
+
+## CHECK-START: int Main2.signBoolean(boolean) intrinsics_recognition (after)
+## CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>,<<Method>>] intrinsic:IntegerSignum
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier (after)
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
+## CHECK-DAG: <<Result:i\d+>> Compare [<<Phi>>,<<Zero>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier (after)
+## CHECK-NOT: InvokeStaticOrDirect
+
+## CHECK-START: int Main2.signBoolean(boolean) select_generator (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<One:i\d+>> IntConstant 1
+## CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
+## CHECK-DAG: <<Result:i\d+>> Compare [<<Sel>>,<<Zero>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.signBoolean(boolean) select_generator (after)
+## CHECK-NOT: Phi
+
+## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier$after_bce (after)
+## CHECK-DAG: <<Arg:z\d+>> ParameterValue
+## CHECK-DAG: <<Zero:i\d+>> IntConstant 0
+## CHECK-DAG: <<Result:i\d+>> Compare [<<Arg>>,<<Zero>>]
+## CHECK-DAG: Return [<<Result>>]
+
+## CHECK-START: int Main2.signBoolean(boolean) instruction_simplifier$after_bce (after)
+## CHECK-NOT: Select
+
+# Original java source:
+#
+# private static int signBoolean(boolean x) {
+# return Integer.signum(x ? 1 : 0);
+# }
+
+.method public static signBoolean(Z)I
+ .registers 2
+ .param p0, "x" # Z
+
+ .prologue
+ .line 58
+ if-eqz p0, :cond_8
+
+ const/4 v0, 0x1
+
+ :goto_3
+ invoke-static {v0}, Ljava/lang/Integer;->signum(I)I
+
+ move-result v0
+
+ return v0
+
+ :cond_8
+ const/4 v0, 0x0
+
+ goto :goto_3
+.end method
diff --git a/test/566-checker-signum/src-art/Main.java b/test/566-checker-signum/src-art/Main.java
new file mode 100644
index 0000000000..f1e1e1bf6e
--- /dev/null
+++ b/test/566-checker-signum/src-art/Main.java
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+ /// CHECK-START: int Main.signByte(byte) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signByte(byte) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:i\d+>> Compare
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int signByte(byte x) {
+ return Integer.signum(x);
+ }
+
+ /// CHECK-START: int Main.signShort(short) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signShort(short) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:i\d+>> Compare
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signShort(short) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int signShort(short x) {
+ return Integer.signum(x);
+ }
+
+ /// CHECK-START: int Main.signChar(char) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signChar(char) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:i\d+>> Compare
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signChar(char) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int signChar(char x) {
+ return Integer.signum(x);
+ }
+
+ /// CHECK-START: int Main.signInt(int) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signInt(int) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:i\d+>> Compare
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signInt(int) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int signInt(int x) {
+ return Integer.signum(x);
+ }
+
+ /// CHECK-START: int Main.signLong(long) intrinsics_recognition (after)
+ /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongSignum
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signLong(long) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:i\d+>> Compare
+ /// CHECK-DAG: Return [<<Result>>]
+
+ /// CHECK-START: int Main.signLong(long) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ private static int signLong(long x) {
+ return Long.signum(x);
+ }
+
+
+ public static void testSignBoolean() throws Exception {
+ Method signBoolean = Class.forName("Main2").getMethod("signBoolean", boolean.class);
+ expectEquals(0, (int)signBoolean.invoke(null, false));
+ expectEquals(1, (int)signBoolean.invoke(null, true));
+ }
+
+ public static void testSignByte() {
+ expectEquals(-1, signByte((byte)Byte.MIN_VALUE));
+ expectEquals(-1, signByte((byte)-64));
+ expectEquals(-1, signByte((byte)-1));
+ expectEquals(0, signByte((byte)0));
+ expectEquals(1, signByte((byte)1));
+ expectEquals(1, signByte((byte)64));
+ expectEquals(1, signByte((byte)Byte.MAX_VALUE));
+ }
+
+ public static void testSignShort() {
+ expectEquals(-1, signShort((short)Short.MIN_VALUE));
+ expectEquals(-1, signShort((short)-12345));
+ expectEquals(-1, signShort((short)-1));
+ expectEquals(0, signShort((short)0));
+ expectEquals(1, signShort((short)1));
+ expectEquals(1, signShort((short)12345));
+ expectEquals(1, signShort((short)Short.MAX_VALUE));
+ }
+
+ public static void testSignChar() {
+ expectEquals(0, signChar((char)0));
+ expectEquals(1, signChar((char)1));
+ expectEquals(1, signChar((char)12345));
+ expectEquals(1, signChar((char)Character.MAX_VALUE));
+ }
+
+ public static void testSignInt() {
+ expectEquals(-1, signInt(Integer.MIN_VALUE));
+ expectEquals(-1, signInt(-12345));
+ expectEquals(-1, signInt(-1));
+ expectEquals(0, signInt(0));
+ expectEquals(1, signInt(1));
+ expectEquals(1, signInt(12345));
+ expectEquals(1, signInt(Integer.MAX_VALUE));
+
+ for (int i = -11; i <= 11; i++) {
+ int expected = 0;
+ if (i < 0) expected = -1;
+ else if (i > 0) expected = 1;
+ expectEquals(expected, signInt(i));
+ }
+ }
+
+ public static void testSignLong() {
+ expectEquals(-1, signLong(Long.MIN_VALUE));
+ expectEquals(-1, signLong(-12345L));
+ expectEquals(-1, signLong(-1L));
+ expectEquals(0, signLong(0L));
+ expectEquals(1, signLong(1L));
+ expectEquals(1, signLong(12345L));
+ expectEquals(1, signLong(Long.MAX_VALUE));
+
+ expectEquals(-1, signLong(0x800000007FFFFFFFL));
+ expectEquals(-1, signLong(0x80000000FFFFFFFFL));
+ expectEquals(1, signLong(0x000000007FFFFFFFL));
+ expectEquals(1, signLong(0x00000000FFFFFFFFL));
+ expectEquals(1, signLong(0x7FFFFFFF7FFFFFFFL));
+ expectEquals(1, signLong(0x7FFFFFFFFFFFFFFFL));
+
+ for (long i = -11L; i <= 11L; i++) {
+ int expected = 0;
+ if (i < 0) expected = -1;
+ else if (i > 0) expected = 1;
+ expectEquals(expected, signLong(i));
+ }
+
+ for (long i = Long.MIN_VALUE; i <= Long.MIN_VALUE + 11L; i++) {
+ expectEquals(-1, signLong(i));
+ }
+
+ for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 11L; i--) {
+ expectEquals(1, signLong(i));
+ }
+ }
+
+
+ public static void main(String args[]) throws Exception {
+ testSignBoolean();
+ testSignByte();
+ testSignShort();
+ testSignChar();
+ testSignInt();
+ testSignLong();
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/566-checker-signum/src/Main.java b/test/566-checker-signum/src/Main.java
index 7fc9e84055..fa8e5cd1fe 100644
--- a/test/566-checker-signum/src/Main.java
+++ b/test/566-checker-signum/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,222 +14,9 @@
* limitations under the License.
*/
+// This file is just for running on the RI as the test is ART specific.
public class Main {
-
- /// CHECK-START: int Main.signBoolean(boolean) intrinsics_recognition (after)
- /// CHECK-DAG: <<Method:[ij]\d+>> CurrentMethod
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect [<<Phi>>,<<Method>>] intrinsic:IntegerSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier (after)
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Phi:i\d+>> Phi [<<One>>,<<Zero>>]
- /// CHECK-DAG: <<Result:i\d+>> Compare [<<Phi>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- /// CHECK-START: int Main.signBoolean(boolean) select_generator (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<One:i\d+>> IntConstant 1
- /// CHECK-DAG: <<Sel:i\d+>> Select [<<Zero>>,<<One>>,<<Arg>>]
- /// CHECK-DAG: <<Result:i\d+>> Compare [<<Sel>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signBoolean(boolean) select_generator (after)
- /// CHECK-NOT: Phi
-
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier$after_bce (after)
- /// CHECK-DAG: <<Arg:z\d+>> ParameterValue
- /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0
- /// CHECK-DAG: <<Result:i\d+>> Compare [<<Arg>>,<<Zero>>]
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signBoolean(boolean) instruction_simplifier$after_bce (after)
- /// CHECK-NOT: Select
-
- private static int signBoolean(boolean x) {
- return Integer.signum(x ? 1 : 0);
- }
-
- /// CHECK-START: int Main.signByte(byte) intrinsics_recognition (after)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signByte(byte) instruction_simplifier (after)
- /// CHECK-DAG: <<Result:i\d+>> Compare
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signByte(byte) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int signByte(byte x) {
- return Integer.signum(x);
- }
-
- /// CHECK-START: int Main.signShort(short) intrinsics_recognition (after)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signShort(short) instruction_simplifier (after)
- /// CHECK-DAG: <<Result:i\d+>> Compare
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signShort(short) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int signShort(short x) {
- return Integer.signum(x);
- }
-
- /// CHECK-START: int Main.signChar(char) intrinsics_recognition (after)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signChar(char) instruction_simplifier (after)
- /// CHECK-DAG: <<Result:i\d+>> Compare
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signChar(char) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int signChar(char x) {
- return Integer.signum(x);
- }
-
- /// CHECK-START: int Main.signInt(int) intrinsics_recognition (after)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:IntegerSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signInt(int) instruction_simplifier (after)
- /// CHECK-DAG: <<Result:i\d+>> Compare
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signInt(int) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int signInt(int x) {
- return Integer.signum(x);
- }
-
- /// CHECK-START: int Main.signLong(long) intrinsics_recognition (after)
- /// CHECK-DAG: <<Result:i\d+>> InvokeStaticOrDirect intrinsic:LongSignum
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signLong(long) instruction_simplifier (after)
- /// CHECK-DAG: <<Result:i\d+>> Compare
- /// CHECK-DAG: Return [<<Result>>]
-
- /// CHECK-START: int Main.signLong(long) instruction_simplifier (after)
- /// CHECK-NOT: InvokeStaticOrDirect
-
- private static int signLong(long x) {
- return Long.signum(x);
- }
-
-
- public static void testSignBoolean() {
- expectEquals(0, signBoolean(false));
- expectEquals(1, signBoolean(true));
- }
-
- public static void testSignByte() {
- expectEquals(-1, signByte((byte)Byte.MIN_VALUE));
- expectEquals(-1, signByte((byte)-64));
- expectEquals(-1, signByte((byte)-1));
- expectEquals(0, signByte((byte)0));
- expectEquals(1, signByte((byte)1));
- expectEquals(1, signByte((byte)64));
- expectEquals(1, signByte((byte)Byte.MAX_VALUE));
- }
-
- public static void testSignShort() {
- expectEquals(-1, signShort((short)Short.MIN_VALUE));
- expectEquals(-1, signShort((short)-12345));
- expectEquals(-1, signShort((short)-1));
- expectEquals(0, signShort((short)0));
- expectEquals(1, signShort((short)1));
- expectEquals(1, signShort((short)12345));
- expectEquals(1, signShort((short)Short.MAX_VALUE));
- }
-
- public static void testSignChar() {
- expectEquals(0, signChar((char)0));
- expectEquals(1, signChar((char)1));
- expectEquals(1, signChar((char)12345));
- expectEquals(1, signChar((char)Character.MAX_VALUE));
- }
-
- public static void testSignInt() {
- expectEquals(-1, signInt(Integer.MIN_VALUE));
- expectEquals(-1, signInt(-12345));
- expectEquals(-1, signInt(-1));
- expectEquals(0, signInt(0));
- expectEquals(1, signInt(1));
- expectEquals(1, signInt(12345));
- expectEquals(1, signInt(Integer.MAX_VALUE));
-
- for (int i = -11; i <= 11; i++) {
- int expected = 0;
- if (i < 0) expected = -1;
- else if (i > 0) expected = 1;
- expectEquals(expected, signInt(i));
- }
- }
-
- public static void testSignLong() {
- expectEquals(-1, signLong(Long.MIN_VALUE));
- expectEquals(-1, signLong(-12345L));
- expectEquals(-1, signLong(-1L));
- expectEquals(0, signLong(0L));
- expectEquals(1, signLong(1L));
- expectEquals(1, signLong(12345L));
- expectEquals(1, signLong(Long.MAX_VALUE));
-
- expectEquals(-1, signLong(0x800000007FFFFFFFL));
- expectEquals(-1, signLong(0x80000000FFFFFFFFL));
- expectEquals(1, signLong(0x000000007FFFFFFFL));
- expectEquals(1, signLong(0x00000000FFFFFFFFL));
- expectEquals(1, signLong(0x7FFFFFFF7FFFFFFFL));
- expectEquals(1, signLong(0x7FFFFFFFFFFFFFFFL));
-
- for (long i = -11L; i <= 11L; i++) {
- int expected = 0;
- if (i < 0) expected = -1;
- else if (i > 0) expected = 1;
- expectEquals(expected, signLong(i));
- }
-
- for (long i = Long.MIN_VALUE; i <= Long.MIN_VALUE + 11L; i++) {
- expectEquals(-1, signLong(i));
- }
-
- for (long i = Long.MAX_VALUE; i >= Long.MAX_VALUE - 11L; i--) {
- expectEquals(1, signLong(i));
- }
- }
-
-
- public static void main(String args[]) {
- testSignBoolean();
- testSignByte();
- testSignShort();
- testSignChar();
- testSignInt();
- testSignLong();
-
+ public static void main(String[] args) {
System.out.println("passed");
}
-
- private static void expectEquals(int expected, int result) {
- if (expected != result) {
- throw new Error("Expected: " + expected + ", found: " + result);
- }
- }
}
diff --git a/test/570-checker-osr/build b/test/570-checker-osr/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/570-checker-osr/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/583-checker-zero/build b/test/583-checker-zero/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/583-checker-zero/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/618-checker-induction/build b/test/618-checker-induction/build
deleted file mode 100644
index 10ffcc537d..0000000000
--- a/test/618-checker-induction/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/618-checker-induction/src/Main.java b/test/618-checker-induction/src/Main.java
index 0080ffa464..1460725e10 100644
--- a/test/618-checker-induction/src/Main.java
+++ b/test/618-checker-induction/src/Main.java
@@ -290,7 +290,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
- /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: int Main.closedFormNested() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -313,7 +313,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:<<Loop1>>
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:<<Loop1>>
- /// CHECK-DAG: Return [<<Phi1>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: int Main.closedFormNestedAlt() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -411,7 +411,7 @@ public class Main {
/// CHECK-START: int Main.periodicReturned9() loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: int Main.periodicReturned9() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -430,7 +430,7 @@ public class Main {
/// CHECK-START: int Main.periodicReturned10() loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: int Main.periodicReturned10() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -450,7 +450,7 @@ public class Main {
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi3>>] loop:none
+ /// CHECK-DAG: Return [<<Phi2>>] loop:none
//
/// CHECK-START: int Main.getSum21() loop_optimization (after)
/// CHECK-NOT: Phi
@@ -505,7 +505,7 @@ public class Main {
/// CHECK-START: int Main.periodicReturnedN(int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: int Main.periodicReturnedN(int) loop_optimization (after)
/// CHECK-NOT: Phi
@@ -547,7 +547,7 @@ public class Main {
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Phi3:i\d+>> Phi loop:<<Loop2:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi4:i\d+>> Phi loop:<<Loop2>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi3>>] loop:none
+ /// CHECK-DAG: Return [<<Phi4>>] loop:none
/// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
//
/// CHECK-START: int Main.closedFeed() loop_optimization (after)
@@ -691,7 +691,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom1N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom1N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
@@ -705,7 +705,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom2N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom2N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
@@ -719,7 +719,7 @@ public class Main {
/// CHECK-START: boolean Main.periodicBoolIdiom3N(boolean, int) loop_optimization (before)
/// CHECK-DAG: <<Phi1:i\d+>> Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: <<Phi2:i\d+>> Phi loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: Return [<<Phi2>>] loop:none
+ /// CHECK-DAG: Return [<<Phi1>>] loop:none
//
/// CHECK-START: boolean Main.periodicBoolIdiom3N(boolean, int) loop_optimization (after)
/// CHECK-NOT: Phi
diff --git a/test/626-checker-arm64-scratch-register/build b/test/626-checker-arm64-scratch-register/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/626-checker-arm64-scratch-register/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/626-checker-arm64-scratch-register/smali/Main2.smali b/test/626-checker-arm64-scratch-register/smali/Main2.smali
new file mode 100644
index 0000000000..914ae6eeaf
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/smali/Main2.smali
@@ -0,0 +1,1768 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LMain2;
+.super Ljava/lang/Object;
+
+
+# instance fields
+.field b00:Z
+
+.field b01:Z
+
+.field b02:Z
+
+.field b03:Z
+
+.field b04:Z
+
+.field b05:Z
+
+.field b06:Z
+
+.field b07:Z
+
+.field b08:Z
+
+.field b09:Z
+
+.field b10:Z
+
+.field b11:Z
+
+.field b12:Z
+
+.field b13:Z
+
+.field b14:Z
+
+.field b15:Z
+
+.field b16:Z
+
+.field b17:Z
+
+.field b18:Z
+
+.field b19:Z
+
+.field b20:Z
+
+.field b21:Z
+
+.field b22:Z
+
+.field b23:Z
+
+.field b24:Z
+
+.field b25:Z
+
+.field b26:Z
+
+.field b27:Z
+
+.field b28:Z
+
+.field b29:Z
+
+.field b30:Z
+
+.field b31:Z
+
+.field b32:Z
+
+.field b33:Z
+
+.field b34:Z
+
+.field b35:Z
+
+.field b36:Z
+
+.field conditionA:Z
+
+.field conditionB:Z
+
+.field conditionC:Z
+
+
+# direct methods
+.method public constructor <init>()V
+ .registers 1
+
+ .prologue
+ .line 17
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+
+ return-void
+.end method
+
+## CHECK-START-ARM64: void Main2.test() register (after)
+## CHECK: begin_block
+## CHECK: name "B0"
+## CHECK: <<This:l\d+>> ParameterValue
+## CHECK: end_block
+## CHECK: begin_block
+## CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+## CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main2.conditionB
+## CHECK: If [<<CondB>>]
+## CHECK: end_block
+## CHECK: begin_block
+## CHECK: name "<<ElseBlock>>"
+## CHECK: ParallelMove moves:[40(sp)->d0,24(sp)->32(sp),28(sp)->36(sp),d0->d3,d3->d4,d2->d5,d4->d6,d5->d7,d6->d18,d7->d19,d18->d20,d19->d21,d20->d22,d21->d23,d22->d10,d23->d11,16(sp)->24(sp),20(sp)->28(sp),d10->d14,d11->d12,d12->d13,d13->d1,d14->d2,32(sp)->16(sp),36(sp)->20(sp)]
+## CHECK: end_block
+
+## CHECK-START-ARM64: void Main2.test() disassembly (after)
+## CHECK: begin_block
+## CHECK: name "B0"
+## CHECK: <<This:l\d+>> ParameterValue
+## CHECK: end_block
+## CHECK: begin_block
+## CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
+## CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main2.conditionB
+## CHECK: If [<<CondB>>]
+## CHECK: end_block
+## CHECK: begin_block
+## CHECK: name "<<ElseBlock>>"
+## CHECK: ParallelMove moves:[invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid]
+## CHECK: fmov d31, d2
+## CHECK: ldr s2, [sp, #36]
+## CHECK: ldr w16, [sp, #16]
+## CHECK: str w16, [sp, #36]
+## CHECK: str s14, [sp, #16]
+## CHECK: ldr s14, [sp, #28]
+## CHECK: str s1, [sp, #28]
+## CHECK: ldr s1, [sp, #32]
+## CHECK: str s31, [sp, #32]
+## CHECK: ldr s31, [sp, #20]
+## CHECK: str s31, [sp, #40]
+## CHECK: str s12, [sp, #20]
+## CHECK: fmov d12, d11
+## CHECK: fmov d11, d10
+## CHECK: fmov d10, d23
+## CHECK: fmov d23, d22
+## CHECK: fmov d22, d21
+## CHECK: fmov d21, d20
+## CHECK: fmov d20, d19
+## CHECK: fmov d19, d18
+## CHECK: fmov d18, d7
+## CHECK: fmov d7, d6
+## CHECK: fmov d6, d5
+## CHECK: fmov d5, d4
+## CHECK: fmov d4, d3
+## CHECK: fmov d3, d13
+## CHECK: ldr s13, [sp, #24]
+## CHECK: str s3, [sp, #24]
+## CHECK: ldr s3, pc+{{\d+}} (addr {{0x[0-9a-f]+}}) (100)
+## CHECK: end_block
+
+# Original java source:
+#
+# public void test() {
+# String r = "";
+#
+# // For the purpose of this regression test, the order of
+# // definition of these float variable matters. Likewise with the
+# // order of the instructions where these variables are used below.
+# // Reordering these lines make make the original (b/32545705)
+# // issue vanish.
+# float f17 = b17 ? 0.0f : 1.0f;
+# float f16 = b16 ? 0.0f : 1.0f;
+# float f18 = b18 ? 0.0f : 1.0f;
+# float f19 = b19 ? 0.0f : 1.0f;
+# float f20 = b20 ? 0.0f : 1.0f;
+# float f21 = b21 ? 0.0f : 1.0f;
+# float f15 = b15 ? 0.0f : 1.0f;
+# float f00 = b00 ? 0.0f : 1.0f;
+# float f22 = b22 ? 0.0f : 1.0f;
+# float f23 = b23 ? 0.0f : 1.0f;
+# float f24 = b24 ? 0.0f : 1.0f;
+# float f25 = b25 ? 0.0f : 1.0f;
+# float f26 = b26 ? 0.0f : 1.0f;
+# float f27 = b27 ? 0.0f : 1.0f;
+# float f29 = b29 ? 0.0f : 1.0f;
+# float f28 = b28 ? 0.0f : 1.0f;
+# float f01 = b01 ? 0.0f : 1.0f;
+# float f02 = b02 ? 0.0f : 1.0f;
+# float f03 = b03 ? 0.0f : 1.0f;
+# float f04 = b04 ? 0.0f : 1.0f;
+# float f05 = b05 ? 0.0f : 1.0f;
+# float f07 = b07 ? 0.0f : 1.0f;
+# float f06 = b06 ? 0.0f : 1.0f;
+# float f30 = b30 ? 0.0f : 1.0f;
+# float f31 = b31 ? 0.0f : 1.0f;
+# float f32 = b32 ? 0.0f : 1.0f;
+# float f33 = b33 ? 0.0f : 1.0f;
+# float f34 = b34 ? 0.0f : 1.0f;
+# float f36 = b36 ? 0.0f : 1.0f;
+# float f35 = b35 ? 0.0f : 1.0f;
+# float f08 = b08 ? 0.0f : 1.0f;
+# float f09 = b09 ? 0.0f : 1.0f;
+# float f10 = b10 ? 0.0f : 1.0f;
+# float f11 = b11 ? 0.0f : 1.0f;
+# float f12 = b12 ? 0.0f : 1.0f;
+# float f14 = b14 ? 0.0f : 1.0f;
+# float f13 = b13 ? 0.0f : 1.0f;
+#
+# if (conditionA) {
+# f16 /= 1000.0f;
+# f17 /= 1000.0f;
+# f18 /= 1000.0f;
+# f19 /= 1000.0f;
+# f20 /= 1000.0f;
+# f21 /= 1000.0f;
+# f15 /= 1000.0f;
+# f08 /= 1000.0f;
+# f09 /= 1000.0f;
+# f10 /= 1000.0f;
+# f11 /= 1000.0f;
+# f12 /= 1000.0f;
+# f30 /= 1000.0f;
+# f31 /= 1000.0f;
+# f32 /= 1000.0f;
+# f33 /= 1000.0f;
+# f34 /= 1000.0f;
+# f01 /= 1000.0f;
+# f02 /= 1000.0f;
+# f03 /= 1000.0f;
+# f04 /= 1000.0f;
+# f05 /= 1000.0f;
+# f23 /= 1000.0f;
+# f24 /= 1000.0f;
+# f25 /= 1000.0f;
+# f26 /= 1000.0f;
+# f27 /= 1000.0f;
+# f22 /= 1000.0f;
+# f00 /= 1000.0f;
+# f14 /= 1000.0f;
+# f13 /= 1000.0f;
+# f36 /= 1000.0f;
+# f35 /= 1000.0f;
+# f07 /= 1000.0f;
+# f06 /= 1000.0f;
+# f29 /= 1000.0f;
+# f28 /= 1000.0f;
+# }
+# // The parallel move that used to exhaust the ARM64 parallel move
+# // resolver's scratch register pool (provided by VIXL) was in the
+# // "else" branch of the following condition generated by ART's
+# // compiler.
+# if (conditionB) {
+# f16 /= 100.0f;
+# f17 /= 100.0f;
+# f18 /= 100.0f;
+# f19 /= 100.0f;
+# f20 /= 100.0f;
+# f21 /= 100.0f;
+# f15 /= 100.0f;
+# f08 /= 100.0f;
+# f09 /= 100.0f;
+# f10 /= 100.0f;
+# f11 /= 100.0f;
+# f12 /= 100.0f;
+# f30 /= 100.0f;
+# f31 /= 100.0f;
+# f32 /= 100.0f;
+# f33 /= 100.0f;
+# f34 /= 100.0f;
+# f01 /= 100.0f;
+# f02 /= 100.0f;
+# f03 /= 100.0f;
+# f04 /= 100.0f;
+# f05 /= 100.0f;
+# f23 /= 100.0f;
+# f24 /= 100.0f;
+# f25 /= 100.0f;
+# f26 /= 100.0f;
+# f27 /= 100.0f;
+# f22 /= 100.0f;
+# f00 /= 100.0f;
+# f14 /= 100.0f;
+# f13 /= 100.0f;
+# f36 /= 100.0f;
+# f35 /= 100.0f;
+# f07 /= 100.0f;
+# f06 /= 100.0f;
+# f29 /= 100.0f;
+# f28 /= 100.0f;
+# }
+# if (conditionC) {
+# f16 /= 12.0f;
+# f17 /= 12.0f;
+# f18 /= 12.0f;
+# f19 /= 12.0f;
+# f20 /= 12.0f;
+# f21 /= 12.0f;
+# f15 /= 12.0f;
+# f08 /= 12.0f;
+# f09 /= 12.0f;
+# f10 /= 12.0f;
+# f11 /= 12.0f;
+# f12 /= 12.0f;
+# f30 /= 12.0f;
+# f31 /= 12.0f;
+# f32 /= 12.0f;
+# f33 /= 12.0f;
+# f34 /= 12.0f;
+# f01 /= 12.0f;
+# f02 /= 12.0f;
+# f03 /= 12.0f;
+# f04 /= 12.0f;
+# f05 /= 12.0f;
+# f23 /= 12.0f;
+# f24 /= 12.0f;
+# f25 /= 12.0f;
+# f26 /= 12.0f;
+# f27 /= 12.0f;
+# f22 /= 12.0f;
+# f00 /= 12.0f;
+# f14 /= 12.0f;
+# f13 /= 12.0f;
+# f36 /= 12.0f;
+# f35 /= 12.0f;
+# f07 /= 12.0f;
+# f06 /= 12.0f;
+# f29 /= 12.0f;
+# f28 /= 12.0f;
+# }
+# float s = 0.0f;
+# s = ((float) Math.round(100.0f * s)) / 100.0f;
+# String res = s + r;
+# }
+
+# virtual methods
+.method public test()V
+ .registers 45
+
+ .prologue
+ .line 121
+ const-string v39, ""
+
+ .line 128
+ .local v39, "r":Ljava/lang/String;
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b17:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_367
+
+ const/16 v19, 0x0
+
+ .line 129
+ .local v19, "f17":F
+ :goto_c
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b16:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_36b
+
+ const/16 v18, 0x0
+
+ .line 130
+ .local v18, "f16":F
+ :goto_16
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b18:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_36f
+
+ const/16 v20, 0x0
+
+ .line 131
+ .local v20, "f18":F
+ :goto_20
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b19:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_373
+
+ const/16 v21, 0x0
+
+ .line 132
+ .local v21, "f19":F
+ :goto_2a
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b20:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_377
+
+ const/16 v22, 0x0
+
+ .line 133
+ .local v22, "f20":F
+ :goto_34
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b21:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_37b
+
+ const/16 v23, 0x0
+
+ .line 134
+ .local v23, "f21":F
+ :goto_3e
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b15:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_37f
+
+ const/16 v17, 0x0
+
+ .line 135
+ .local v17, "f15":F
+ :goto_48
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b00:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_383
+
+ const/4 v2, 0x0
+
+ .line 136
+ .local v2, "f00":F
+ :goto_51
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b22:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_387
+
+ const/16 v24, 0x0
+
+ .line 137
+ .local v24, "f22":F
+ :goto_5b
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b23:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_38b
+
+ const/16 v25, 0x0
+
+ .line 138
+ .local v25, "f23":F
+ :goto_65
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b24:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_38f
+
+ const/16 v26, 0x0
+
+ .line 139
+ .local v26, "f24":F
+ :goto_6f
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b25:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_393
+
+ const/16 v27, 0x0
+
+ .line 140
+ .local v27, "f25":F
+ :goto_79
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b26:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_397
+
+ const/16 v28, 0x0
+
+ .line 141
+ .local v28, "f26":F
+ :goto_83
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b27:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_39b
+
+ const/16 v29, 0x0
+
+ .line 142
+ .local v29, "f27":F
+ :goto_8d
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b29:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_39f
+
+ const/16 v31, 0x0
+
+ .line 143
+ .local v31, "f29":F
+ :goto_97
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b28:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3a3
+
+ const/16 v30, 0x0
+
+ .line 144
+ .local v30, "f28":F
+ :goto_a1
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b01:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3a7
+
+ const/4 v3, 0x0
+
+ .line 145
+ .local v3, "f01":F
+ :goto_aa
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b02:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3ab
+
+ const/4 v4, 0x0
+
+ .line 146
+ .local v4, "f02":F
+ :goto_b3
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b03:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3af
+
+ const/4 v5, 0x0
+
+ .line 147
+ .local v5, "f03":F
+ :goto_bc
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b04:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3b3
+
+ const/4 v6, 0x0
+
+ .line 148
+ .local v6, "f04":F
+ :goto_c5
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b05:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3b7
+
+ const/4 v7, 0x0
+
+ .line 149
+ .local v7, "f05":F
+ :goto_ce
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b07:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3bb
+
+ const/4 v9, 0x0
+
+ .line 150
+ .local v9, "f07":F
+ :goto_d7
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b06:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3bf
+
+ const/4 v8, 0x0
+
+ .line 151
+ .local v8, "f06":F
+ :goto_e0
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b30:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3c3
+
+ const/16 v32, 0x0
+
+ .line 152
+ .local v32, "f30":F
+ :goto_ea
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b31:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3c7
+
+ const/16 v33, 0x0
+
+ .line 153
+ .local v33, "f31":F
+ :goto_f4
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b32:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3cb
+
+ const/16 v34, 0x0
+
+ .line 154
+ .local v34, "f32":F
+ :goto_fe
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b33:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3cf
+
+ const/16 v35, 0x0
+
+ .line 155
+ .local v35, "f33":F
+ :goto_108
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b34:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3d3
+
+ const/16 v36, 0x0
+
+ .line 156
+ .local v36, "f34":F
+ :goto_112
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b36:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3d7
+
+ const/16 v38, 0x0
+
+ .line 157
+ .local v38, "f36":F
+ :goto_11c
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b35:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3db
+
+ const/16 v37, 0x0
+
+ .line 158
+ .local v37, "f35":F
+ :goto_126
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b08:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3df
+
+ const/4 v10, 0x0
+
+ .line 159
+ .local v10, "f08":F
+ :goto_12f
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b09:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3e3
+
+ const/4 v11, 0x0
+
+ .line 160
+ .local v11, "f09":F
+ :goto_138
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b10:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3e7
+
+ const/4 v12, 0x0
+
+ .line 161
+ .local v12, "f10":F
+ :goto_141
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b11:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3eb
+
+ const/4 v13, 0x0
+
+ .line 162
+ .local v13, "f11":F
+ :goto_14a
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b12:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3ef
+
+ const/4 v14, 0x0
+
+ .line 163
+ .local v14, "f12":F
+ :goto_153
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b14:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3f3
+
+ const/16 v16, 0x0
+
+ .line 164
+ .local v16, "f14":F
+ :goto_15d
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->b13:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_3f7
+
+ const/4 v15, 0x0
+
+ .line 166
+ .local v15, "f13":F
+ :goto_166
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->conditionA:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_202
+
+ .line 167
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v18, v18, v42
+
+ .line 168
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v19, v19, v42
+
+ .line 169
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v20, v20, v42
+
+ .line 170
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v21, v21, v42
+
+ .line 171
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v22, v22, v42
+
+ .line 172
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v23, v23, v42
+
+ .line 173
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v17, v17, v42
+
+ .line 174
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v10, v10, v42
+
+ .line 175
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v11, v11, v42
+
+ .line 176
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v12, v12, v42
+
+ .line 177
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v13, v13, v42
+
+ .line 178
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v14, v14, v42
+
+ .line 179
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v32, v32, v42
+
+ .line 180
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v33, v33, v42
+
+ .line 181
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v34, v34, v42
+
+ .line 182
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v35, v35, v42
+
+ .line 183
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v36, v36, v42
+
+ .line 184
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v3, v3, v42
+
+ .line 185
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v4, v4, v42
+
+ .line 186
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v5, v5, v42
+
+ .line 187
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v6, v6, v42
+
+ .line 188
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v7, v7, v42
+
+ .line 189
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v25, v25, v42
+
+ .line 190
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v26, v26, v42
+
+ .line 191
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v27, v27, v42
+
+ .line 192
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v28, v28, v42
+
+ .line 193
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v29, v29, v42
+
+ .line 194
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v24, v24, v42
+
+ .line 195
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v2, v2, v42
+
+ .line 196
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v16, v16, v42
+
+ .line 197
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v15, v15, v42
+
+ .line 198
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v38, v38, v42
+
+ .line 199
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v37, v37, v42
+
+ .line 200
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v9, v9, v42
+
+ .line 201
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v8, v8, v42
+
+ .line 202
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v31, v31, v42
+
+ .line 203
+ const/high16 v42, 0x447a0000 # 1000.0f
+
+ div-float v30, v30, v42
+
+ .line 209
+ :cond_202
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->conditionB:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_29e
+
+ .line 210
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v18, v18, v42
+
+ .line 211
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v19, v19, v42
+
+ .line 212
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v20, v20, v42
+
+ .line 213
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v21, v21, v42
+
+ .line 214
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v22, v22, v42
+
+ .line 215
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v23, v23, v42
+
+ .line 216
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v17, v17, v42
+
+ .line 217
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v10, v10, v42
+
+ .line 218
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v11, v11, v42
+
+ .line 219
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v12, v12, v42
+
+ .line 220
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v13, v13, v42
+
+ .line 221
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v14, v14, v42
+
+ .line 222
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v32, v32, v42
+
+ .line 223
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v33, v33, v42
+
+ .line 224
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v34, v34, v42
+
+ .line 225
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v35, v35, v42
+
+ .line 226
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v36, v36, v42
+
+ .line 227
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v3, v3, v42
+
+ .line 228
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v4, v4, v42
+
+ .line 229
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v5, v5, v42
+
+ .line 230
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v6, v6, v42
+
+ .line 231
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v7, v7, v42
+
+ .line 232
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v25, v25, v42
+
+ .line 233
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v26, v26, v42
+
+ .line 234
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v27, v27, v42
+
+ .line 235
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v28, v28, v42
+
+ .line 236
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v29, v29, v42
+
+ .line 237
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v24, v24, v42
+
+ .line 238
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v2, v2, v42
+
+ .line 239
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v16, v16, v42
+
+ .line 240
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v15, v15, v42
+
+ .line 241
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v38, v38, v42
+
+ .line 242
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v37, v37, v42
+
+ .line 243
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v9, v9, v42
+
+ .line 244
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v8, v8, v42
+
+ .line 245
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v31, v31, v42
+
+ .line 246
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ div-float v30, v30, v42
+
+ .line 248
+ :cond_29e
+ move-object/from16 v0, p0
+
+ iget-boolean v0, v0, LMain2;->conditionC:Z
+
+ move/from16 v42, v0
+
+ if-eqz v42, :cond_33a
+
+ .line 249
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v18, v18, v42
+
+ .line 250
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v19, v19, v42
+
+ .line 251
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v20, v20, v42
+
+ .line 252
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v21, v21, v42
+
+ .line 253
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v22, v22, v42
+
+ .line 254
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v23, v23, v42
+
+ .line 255
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v17, v17, v42
+
+ .line 256
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v10, v10, v42
+
+ .line 257
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v11, v11, v42
+
+ .line 258
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v12, v12, v42
+
+ .line 259
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v13, v13, v42
+
+ .line 260
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v14, v14, v42
+
+ .line 261
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v32, v32, v42
+
+ .line 262
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v33, v33, v42
+
+ .line 263
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v34, v34, v42
+
+ .line 264
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v35, v35, v42
+
+ .line 265
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v36, v36, v42
+
+ .line 266
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v3, v3, v42
+
+ .line 267
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v4, v4, v42
+
+ .line 268
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v5, v5, v42
+
+ .line 269
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v6, v6, v42
+
+ .line 270
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v7, v7, v42
+
+ .line 271
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v25, v25, v42
+
+ .line 272
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v26, v26, v42
+
+ .line 273
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v27, v27, v42
+
+ .line 274
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v28, v28, v42
+
+ .line 275
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v29, v29, v42
+
+ .line 276
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v24, v24, v42
+
+ .line 277
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v2, v2, v42
+
+ .line 278
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v16, v16, v42
+
+ .line 279
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v15, v15, v42
+
+ .line 280
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v38, v38, v42
+
+ .line 281
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v37, v37, v42
+
+ .line 282
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v9, v9, v42
+
+ .line 283
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v8, v8, v42
+
+ .line 284
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v31, v31, v42
+
+ .line 285
+ const/high16 v42, 0x41400000 # 12.0f
+
+ div-float v30, v30, v42
+
+ .line 287
+ :cond_33a
+ const/16 v41, 0x0
+
+ .line 288
+ .local v41, "s":F
+ const/high16 v42, 0x42c80000 # 100.0f
+
+ mul-float v42, v42, v41
+
+ invoke-static/range {v42 .. v42}, Ljava/lang/Math;->round(F)I
+
+ move-result v42
+
+ move/from16 v0, v42
+
+ int-to-float v0, v0
+
+ move/from16 v42, v0
+
+ const/high16 v43, 0x42c80000 # 100.0f
+
+ div-float v41, v42, v43
+
+ .line 289
+ new-instance v42, Ljava/lang/StringBuilder;
+
+ invoke-direct/range {v42 .. v42}, Ljava/lang/StringBuilder;-><init>()V
+
+ move-object/from16 v0, v42
+
+ move/from16 v1, v41
+
+ invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(F)Ljava/lang/StringBuilder;
+
+ move-result-object v42
+
+ move-object/from16 v0, v42
+
+ move-object/from16 v1, v39
+
+ invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+
+ move-result-object v42
+
+ invoke-virtual/range {v42 .. v42}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+
+ move-result-object v40
+
+ .line 290
+ .local v40, "res":Ljava/lang/String;
+ return-void
+
+ .line 128
+ .end local v2 # "f00":F
+ .end local v3 # "f01":F
+ .end local v4 # "f02":F
+ .end local v5 # "f03":F
+ .end local v6 # "f04":F
+ .end local v7 # "f05":F
+ .end local v8 # "f06":F
+ .end local v9 # "f07":F
+ .end local v10 # "f08":F
+ .end local v11 # "f09":F
+ .end local v12 # "f10":F
+ .end local v13 # "f11":F
+ .end local v14 # "f12":F
+ .end local v15 # "f13":F
+ .end local v16 # "f14":F
+ .end local v17 # "f15":F
+ .end local v18 # "f16":F
+ .end local v19 # "f17":F
+ .end local v20 # "f18":F
+ .end local v21 # "f19":F
+ .end local v22 # "f20":F
+ .end local v23 # "f21":F
+ .end local v24 # "f22":F
+ .end local v25 # "f23":F
+ .end local v26 # "f24":F
+ .end local v27 # "f25":F
+ .end local v28 # "f26":F
+ .end local v29 # "f27":F
+ .end local v30 # "f28":F
+ .end local v31 # "f29":F
+ .end local v32 # "f30":F
+ .end local v33 # "f31":F
+ .end local v34 # "f32":F
+ .end local v35 # "f33":F
+ .end local v36 # "f34":F
+ .end local v37 # "f35":F
+ .end local v38 # "f36":F
+ .end local v40 # "res":Ljava/lang/String;
+ .end local v41 # "s":F
+ :cond_367
+ const/high16 v19, 0x3f800000 # 1.0f
+
+ goto/16 :goto_c
+
+ .line 129
+ .restart local v19 # "f17":F
+ :cond_36b
+ const/high16 v18, 0x3f800000 # 1.0f
+
+ goto/16 :goto_16
+
+ .line 130
+ .restart local v18 # "f16":F
+ :cond_36f
+ const/high16 v20, 0x3f800000 # 1.0f
+
+ goto/16 :goto_20
+
+ .line 131
+ .restart local v20 # "f18":F
+ :cond_373
+ const/high16 v21, 0x3f800000 # 1.0f
+
+ goto/16 :goto_2a
+
+ .line 132
+ .restart local v21 # "f19":F
+ :cond_377
+ const/high16 v22, 0x3f800000 # 1.0f
+
+ goto/16 :goto_34
+
+ .line 133
+ .restart local v22 # "f20":F
+ :cond_37b
+ const/high16 v23, 0x3f800000 # 1.0f
+
+ goto/16 :goto_3e
+
+ .line 134
+ .restart local v23 # "f21":F
+ :cond_37f
+ const/high16 v17, 0x3f800000 # 1.0f
+
+ goto/16 :goto_48
+
+ .line 135
+ .restart local v17 # "f15":F
+ :cond_383
+ const/high16 v2, 0x3f800000 # 1.0f
+
+ goto/16 :goto_51
+
+ .line 136
+ .restart local v2 # "f00":F
+ :cond_387
+ const/high16 v24, 0x3f800000 # 1.0f
+
+ goto/16 :goto_5b
+
+ .line 137
+ .restart local v24 # "f22":F
+ :cond_38b
+ const/high16 v25, 0x3f800000 # 1.0f
+
+ goto/16 :goto_65
+
+ .line 138
+ .restart local v25 # "f23":F
+ :cond_38f
+ const/high16 v26, 0x3f800000 # 1.0f
+
+ goto/16 :goto_6f
+
+ .line 139
+ .restart local v26 # "f24":F
+ :cond_393
+ const/high16 v27, 0x3f800000 # 1.0f
+
+ goto/16 :goto_79
+
+ .line 140
+ .restart local v27 # "f25":F
+ :cond_397
+ const/high16 v28, 0x3f800000 # 1.0f
+
+ goto/16 :goto_83
+
+ .line 141
+ .restart local v28 # "f26":F
+ :cond_39b
+ const/high16 v29, 0x3f800000 # 1.0f
+
+ goto/16 :goto_8d
+
+ .line 142
+ .restart local v29 # "f27":F
+ :cond_39f
+ const/high16 v31, 0x3f800000 # 1.0f
+
+ goto/16 :goto_97
+
+ .line 143
+ .restart local v31 # "f29":F
+ :cond_3a3
+ const/high16 v30, 0x3f800000 # 1.0f
+
+ goto/16 :goto_a1
+
+ .line 144
+ .restart local v30 # "f28":F
+ :cond_3a7
+ const/high16 v3, 0x3f800000 # 1.0f
+
+ goto/16 :goto_aa
+
+ .line 145
+ .restart local v3 # "f01":F
+ :cond_3ab
+ const/high16 v4, 0x3f800000 # 1.0f
+
+ goto/16 :goto_b3
+
+ .line 146
+ .restart local v4 # "f02":F
+ :cond_3af
+ const/high16 v5, 0x3f800000 # 1.0f
+
+ goto/16 :goto_bc
+
+ .line 147
+ .restart local v5 # "f03":F
+ :cond_3b3
+ const/high16 v6, 0x3f800000 # 1.0f
+
+ goto/16 :goto_c5
+
+ .line 148
+ .restart local v6 # "f04":F
+ :cond_3b7
+ const/high16 v7, 0x3f800000 # 1.0f
+
+ goto/16 :goto_ce
+
+ .line 149
+ .restart local v7 # "f05":F
+ :cond_3bb
+ const/high16 v9, 0x3f800000 # 1.0f
+
+ goto/16 :goto_d7
+
+ .line 150
+ .restart local v9 # "f07":F
+ :cond_3bf
+ const/high16 v8, 0x3f800000 # 1.0f
+
+ goto/16 :goto_e0
+
+ .line 151
+ .restart local v8 # "f06":F
+ :cond_3c3
+ const/high16 v32, 0x3f800000 # 1.0f
+
+ goto/16 :goto_ea
+
+ .line 152
+ .restart local v32 # "f30":F
+ :cond_3c7
+ const/high16 v33, 0x3f800000 # 1.0f
+
+ goto/16 :goto_f4
+
+ .line 153
+ .restart local v33 # "f31":F
+ :cond_3cb
+ const/high16 v34, 0x3f800000 # 1.0f
+
+ goto/16 :goto_fe
+
+ .line 154
+ .restart local v34 # "f32":F
+ :cond_3cf
+ const/high16 v35, 0x3f800000 # 1.0f
+
+ goto/16 :goto_108
+
+ .line 155
+ .restart local v35 # "f33":F
+ :cond_3d3
+ const/high16 v36, 0x3f800000 # 1.0f
+
+ goto/16 :goto_112
+
+ .line 156
+ .restart local v36 # "f34":F
+ :cond_3d7
+ const/high16 v38, 0x3f800000 # 1.0f
+
+ goto/16 :goto_11c
+
+ .line 157
+ .restart local v38 # "f36":F
+ :cond_3db
+ const/high16 v37, 0x3f800000 # 1.0f
+
+ goto/16 :goto_126
+
+ .line 158
+ .restart local v37 # "f35":F
+ :cond_3df
+ const/high16 v10, 0x3f800000 # 1.0f
+
+ goto/16 :goto_12f
+
+ .line 159
+ .restart local v10 # "f08":F
+ :cond_3e3
+ const/high16 v11, 0x3f800000 # 1.0f
+
+ goto/16 :goto_138
+
+ .line 160
+ .restart local v11 # "f09":F
+ :cond_3e7
+ const/high16 v12, 0x3f800000 # 1.0f
+
+ goto/16 :goto_141
+
+ .line 161
+ .restart local v12 # "f10":F
+ :cond_3eb
+ const/high16 v13, 0x3f800000 # 1.0f
+
+ goto/16 :goto_14a
+
+ .line 162
+ .restart local v13 # "f11":F
+ :cond_3ef
+ const/high16 v14, 0x3f800000 # 1.0f
+
+ goto/16 :goto_153
+
+ .line 163
+ .restart local v14 # "f12":F
+ :cond_3f3
+ const/high16 v16, 0x3f800000 # 1.0f
+
+ goto/16 :goto_15d
+
+ .line 164
+ .restart local v16 # "f14":F
+ :cond_3f7
+ const/high16 v15, 0x3f800000 # 1.0f
+
+ goto/16 :goto_166
+.end method
diff --git a/test/626-checker-arm64-scratch-register/src-art/Main.java b/test/626-checker-arm64-scratch-register/src-art/Main.java
new file mode 100644
index 0000000000..b816586c84
--- /dev/null
+++ b/test/626-checker-arm64-scratch-register/src-art/Main.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ Class main2 = Class.forName("Main2");
+ main2.getMethod("test").invoke(main2.newInstance());
+ System.out.println("passed");
+ }
+}
diff --git a/test/626-checker-arm64-scratch-register/src/Main.java b/test/626-checker-arm64-scratch-register/src/Main.java
index 139491769e..fa8e5cd1fe 100644
--- a/test/626-checker-arm64-scratch-register/src/Main.java
+++ b/test/626-checker-arm64-scratch-register/src/Main.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,284 +14,9 @@
* limitations under the License.
*/
+// This file is just for running on the RI as the test is ART specific.
public class Main {
-
- boolean b00;
- boolean b01;
- boolean b02;
- boolean b03;
- boolean b04;
- boolean b05;
- boolean b06;
- boolean b07;
- boolean b08;
- boolean b09;
- boolean b10;
- boolean b11;
- boolean b12;
- boolean b13;
- boolean b14;
- boolean b15;
- boolean b16;
- boolean b17;
- boolean b18;
- boolean b19;
- boolean b20;
- boolean b21;
- boolean b22;
- boolean b23;
- boolean b24;
- boolean b25;
- boolean b26;
- boolean b27;
- boolean b28;
- boolean b29;
- boolean b30;
- boolean b31;
- boolean b32;
- boolean b33;
- boolean b34;
- boolean b35;
- boolean b36;
-
- boolean conditionA;
- boolean conditionB;
- boolean conditionC;
-
- /// CHECK-START-ARM64: void Main.test() register (after)
- /// CHECK: begin_block
- /// CHECK: name "B0"
- /// CHECK: <<This:l\d+>> ParameterValue
- /// CHECK: end_block
- /// CHECK: begin_block
- /// CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
- /// CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main.conditionB
- /// CHECK: If [<<CondB>>]
- /// CHECK: end_block
- /// CHECK: begin_block
- /// CHECK: name "<<ElseBlock>>"
- /// CHECK: ParallelMove moves:[40(sp)->d0,24(sp)->32(sp),28(sp)->36(sp),d0->d3,d3->d4,d2->d5,d4->d6,d5->d7,d6->d18,d7->d19,d18->d20,d19->d21,d20->d22,d21->d23,d22->d10,d23->d11,16(sp)->24(sp),20(sp)->28(sp),d10->d14,d11->d12,d12->d13,d13->d1,d14->d2,32(sp)->16(sp),36(sp)->20(sp)]
- /// CHECK: end_block
-
- /// CHECK-START-ARM64: void Main.test() disassembly (after)
- /// CHECK: begin_block
- /// CHECK: name "B0"
- /// CHECK: <<This:l\d+>> ParameterValue
- /// CHECK: end_block
- /// CHECK: begin_block
- /// CHECK: successors "<<ThenBlock:B\d+>>" "<<ElseBlock:B\d+>>"
- /// CHECK: <<CondB:z\d+>> InstanceFieldGet [<<This>>] field_name:Main.conditionB
- /// CHECK: If [<<CondB>>]
- /// CHECK: end_block
- /// CHECK: begin_block
- /// CHECK: name "<<ElseBlock>>"
- /// CHECK: ParallelMove moves:[invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid,invalid->invalid]
- /// CHECK: fmov d31, d2
- /// CHECK: ldr s2, [sp, #36]
- /// CHECK: ldr w16, [sp, #16]
- /// CHECK: str w16, [sp, #36]
- /// CHECK: str s14, [sp, #16]
- /// CHECK: ldr s14, [sp, #28]
- /// CHECK: str s1, [sp, #28]
- /// CHECK: ldr s1, [sp, #32]
- /// CHECK: str s31, [sp, #32]
- /// CHECK: ldr s31, [sp, #20]
- /// CHECK: str s31, [sp, #40]
- /// CHECK: str s12, [sp, #20]
- /// CHECK: fmov d12, d11
- /// CHECK: fmov d11, d10
- /// CHECK: fmov d10, d23
- /// CHECK: fmov d23, d22
- /// CHECK: fmov d22, d21
- /// CHECK: fmov d21, d20
- /// CHECK: fmov d20, d19
- /// CHECK: fmov d19, d18
- /// CHECK: fmov d18, d7
- /// CHECK: fmov d7, d6
- /// CHECK: fmov d6, d5
- /// CHECK: fmov d5, d4
- /// CHECK: fmov d4, d3
- /// CHECK: fmov d3, d13
- /// CHECK: ldr s13, [sp, #24]
- /// CHECK: str s3, [sp, #24]
- /// CHECK: ldr s3, pc+{{\d+}} (addr {{0x[0-9a-f]+}}) (100)
- /// CHECK: end_block
-
- public void test() {
- String r = "";
-
- // For the purpose of this regression test, the order of
- // definition of these float variable matters. Likewise with the
- // order of the instructions where these variables are used below.
- // Reordering these lines make make the original (b/32545705)
- // issue vanish.
- float f17 = b17 ? 0.0f : 1.0f;
- float f16 = b16 ? 0.0f : 1.0f;
- float f18 = b18 ? 0.0f : 1.0f;
- float f19 = b19 ? 0.0f : 1.0f;
- float f20 = b20 ? 0.0f : 1.0f;
- float f21 = b21 ? 0.0f : 1.0f;
- float f15 = b15 ? 0.0f : 1.0f;
- float f00 = b00 ? 0.0f : 1.0f;
- float f22 = b22 ? 0.0f : 1.0f;
- float f23 = b23 ? 0.0f : 1.0f;
- float f24 = b24 ? 0.0f : 1.0f;
- float f25 = b25 ? 0.0f : 1.0f;
- float f26 = b26 ? 0.0f : 1.0f;
- float f27 = b27 ? 0.0f : 1.0f;
- float f29 = b29 ? 0.0f : 1.0f;
- float f28 = b28 ? 0.0f : 1.0f;
- float f01 = b01 ? 0.0f : 1.0f;
- float f02 = b02 ? 0.0f : 1.0f;
- float f03 = b03 ? 0.0f : 1.0f;
- float f04 = b04 ? 0.0f : 1.0f;
- float f05 = b05 ? 0.0f : 1.0f;
- float f07 = b07 ? 0.0f : 1.0f;
- float f06 = b06 ? 0.0f : 1.0f;
- float f30 = b30 ? 0.0f : 1.0f;
- float f31 = b31 ? 0.0f : 1.0f;
- float f32 = b32 ? 0.0f : 1.0f;
- float f33 = b33 ? 0.0f : 1.0f;
- float f34 = b34 ? 0.0f : 1.0f;
- float f36 = b36 ? 0.0f : 1.0f;
- float f35 = b35 ? 0.0f : 1.0f;
- float f08 = b08 ? 0.0f : 1.0f;
- float f09 = b09 ? 0.0f : 1.0f;
- float f10 = b10 ? 0.0f : 1.0f;
- float f11 = b11 ? 0.0f : 1.0f;
- float f12 = b12 ? 0.0f : 1.0f;
- float f14 = b14 ? 0.0f : 1.0f;
- float f13 = b13 ? 0.0f : 1.0f;
-
- if (conditionA) {
- f16 /= 1000.0f;
- f17 /= 1000.0f;
- f18 /= 1000.0f;
- f19 /= 1000.0f;
- f20 /= 1000.0f;
- f21 /= 1000.0f;
- f15 /= 1000.0f;
- f08 /= 1000.0f;
- f09 /= 1000.0f;
- f10 /= 1000.0f;
- f11 /= 1000.0f;
- f12 /= 1000.0f;
- f30 /= 1000.0f;
- f31 /= 1000.0f;
- f32 /= 1000.0f;
- f33 /= 1000.0f;
- f34 /= 1000.0f;
- f01 /= 1000.0f;
- f02 /= 1000.0f;
- f03 /= 1000.0f;
- f04 /= 1000.0f;
- f05 /= 1000.0f;
- f23 /= 1000.0f;
- f24 /= 1000.0f;
- f25 /= 1000.0f;
- f26 /= 1000.0f;
- f27 /= 1000.0f;
- f22 /= 1000.0f;
- f00 /= 1000.0f;
- f14 /= 1000.0f;
- f13 /= 1000.0f;
- f36 /= 1000.0f;
- f35 /= 1000.0f;
- f07 /= 1000.0f;
- f06 /= 1000.0f;
- f29 /= 1000.0f;
- f28 /= 1000.0f;
- }
- // The parallel move that used to exhaust the ARM64 parallel move
- // resolver's scratch register pool (provided by VIXL) was in the
- // "else" branch of the following condition generated by ART's
- // compiler.
- if (conditionB) {
- f16 /= 100.0f;
- f17 /= 100.0f;
- f18 /= 100.0f;
- f19 /= 100.0f;
- f20 /= 100.0f;
- f21 /= 100.0f;
- f15 /= 100.0f;
- f08 /= 100.0f;
- f09 /= 100.0f;
- f10 /= 100.0f;
- f11 /= 100.0f;
- f12 /= 100.0f;
- f30 /= 100.0f;
- f31 /= 100.0f;
- f32 /= 100.0f;
- f33 /= 100.0f;
- f34 /= 100.0f;
- f01 /= 100.0f;
- f02 /= 100.0f;
- f03 /= 100.0f;
- f04 /= 100.0f;
- f05 /= 100.0f;
- f23 /= 100.0f;
- f24 /= 100.0f;
- f25 /= 100.0f;
- f26 /= 100.0f;
- f27 /= 100.0f;
- f22 /= 100.0f;
- f00 /= 100.0f;
- f14 /= 100.0f;
- f13 /= 100.0f;
- f36 /= 100.0f;
- f35 /= 100.0f;
- f07 /= 100.0f;
- f06 /= 100.0f;
- f29 /= 100.0f;
- f28 /= 100.0f;
- }
- if (conditionC) {
- f16 /= 12.0f;
- f17 /= 12.0f;
- f18 /= 12.0f;
- f19 /= 12.0f;
- f20 /= 12.0f;
- f21 /= 12.0f;
- f15 /= 12.0f;
- f08 /= 12.0f;
- f09 /= 12.0f;
- f10 /= 12.0f;
- f11 /= 12.0f;
- f12 /= 12.0f;
- f30 /= 12.0f;
- f31 /= 12.0f;
- f32 /= 12.0f;
- f33 /= 12.0f;
- f34 /= 12.0f;
- f01 /= 12.0f;
- f02 /= 12.0f;
- f03 /= 12.0f;
- f04 /= 12.0f;
- f05 /= 12.0f;
- f23 /= 12.0f;
- f24 /= 12.0f;
- f25 /= 12.0f;
- f26 /= 12.0f;
- f27 /= 12.0f;
- f22 /= 12.0f;
- f00 /= 12.0f;
- f14 /= 12.0f;
- f13 /= 12.0f;
- f36 /= 12.0f;
- f35 /= 12.0f;
- f07 /= 12.0f;
- f06 /= 12.0f;
- f29 /= 12.0f;
- f28 /= 12.0f;
- }
- float s = 0.0f;
- s = ((float) Math.round(100.0f * s)) / 100.0f;
- String res = s + r;
- }
-
public static void main(String[] args) {
- Main main = new Main();
- main.test();
System.out.println("passed");
}
}
diff --git a/test/638-no-line-number/build b/test/638-no-line-number/build
index 7eaf50e938..9cd19554bc 100644
--- a/test/638-no-line-number/build
+++ b/test/638-no-line-number/build
@@ -17,9 +17,6 @@
# Stop if something fails.
set -e
-mkdir classes
# Only keep the source name, to make sure we do remove it in the stack trace
# when there is no line number mapping.
-${JAVAC} -g:source -source 7 -target 7 -d classes `find src -name '*.java'`
-${DX} --dex --output=classes.dex classes
-zip $TEST_NAME.jar classes.dex
+JAVAC_ARGS="$JAVAC_ARGS -g:source" ./default-build "$@"
diff --git a/test/638-no-line-number/expected.txt b/test/638-no-line-number/expected.txt
index ffde15312b..4b351f4bf9 100644
--- a/test/638-no-line-number/expected.txt
+++ b/test/638-no-line-number/expected.txt
@@ -2,4 +2,4 @@ java.lang.Error
at Main.main(Unknown Source:2)
java.lang.NullPointerException: throw with null exception
at Main.doThrow(Unknown Source:0)
- at Main.main(Unknown Source:9)
+ at Main.main(Unknown Source:16)
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
index a1c30f7b4e..8efac92c34 100644
--- a/test/639-checker-code-sinking/src/Main.java
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -343,6 +343,37 @@ public class Main {
}
}
+ static native void doStaticNativeCallLiveVreg();
+
+ // Test ensures that 'o' has been moved into the if despite the InvokeStaticOrDirect.
+ //
+ /// CHECK-START: void Main.testSinkingOverInvoke() code_sinking (before)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK-NOT: begin_block
+ /// CHECK: NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testSinkingOverInvoke() code_sinking (after)
+ /// CHECK: <<Int1:i\d+>> IntConstant 1
+ /// CHECK: <<Int0:i\d+>> IntConstant 0
+ /// CHECK: If
+ /// CHECK: begin_block
+ /// CHECK: <<LoadClass:l\d+>> LoadClass class_name:java.lang.Object[]
+ /// CHECK: NewArray [<<LoadClass>>,<<Int1>>]
+ /// CHECK: Throw
+ static void testSinkingOverInvoke() {
+ Object[] o = new Object[1];
+ o[0] = o;
+ doStaticNativeCallLiveVreg();
+ if (doThrow) {
+ throw new Error(o.toString());
+ }
+ }
+
public String $opt$noinline$toString() {
return "" + intField;
}
diff --git a/test/646-checker-hadd-alt-char/build b/test/646-checker-hadd-alt-char/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/646-checker-hadd-alt-char/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
index 2a1382dfde..79904ce74f 100644
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ b/test/646-checker-hadd-alt-char/src/Main.java
@@ -58,7 +58,7 @@ public class Main {
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
@@ -120,7 +120,7 @@ public class Main {
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
diff --git a/test/646-checker-hadd-alt-short/build b/test/646-checker-hadd-alt-short/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/646-checker-hadd-alt-short/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
index 4035b97209..1ecb1d8273 100644
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ b/test/646-checker-hadd-alt-short/src/Main.java
@@ -58,7 +58,7 @@ public class Main {
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<UShr>>] loop:<<Loop>> outer_loop:none
@@ -82,7 +82,9 @@ public class Main {
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
- bo[i] = (short) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >>> 1);
+ int v1 = b1[i] & 0xffff;
+ int v2 = b2[i] & 0xffff;
+ bo[i] = (short) ((v1 + v2) >>> 1);
}
}
@@ -116,7 +118,7 @@ public class Main {
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
@@ -142,7 +144,9 @@ public class Main {
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
- bo[i] = (short) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >>> 1);
+ int v1 = b1[i] & 0xffff;
+ int v2 = b2[i] & 0xffff;
+ bo[i] = (short) ((v1 + v2 + 1) >>> 1);
}
}
diff --git a/test/646-checker-hadd-char/build b/test/646-checker-hadd-char/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/646-checker-hadd-char/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
index 6549dab9ff..cbe629711f 100644
--- a/test/646-checker-hadd-char/src/Main.java
+++ b/test/646-checker-hadd-char/src/Main.java
@@ -67,7 +67,7 @@ public class Main {
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:c\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
@@ -152,7 +152,7 @@ public class Main {
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<IMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
diff --git a/test/646-checker-hadd-short/build b/test/646-checker-hadd-short/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/646-checker-hadd-short/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
index c09da8125b..d78a678dc8 100644
--- a/test/646-checker-hadd-short/src/Main.java
+++ b/test/646-checker-hadd-short/src/Main.java
@@ -86,7 +86,7 @@ public class Main {
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
@@ -110,7 +110,9 @@ public class Main {
private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
- bo[i] = (short) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >> 1);
+ int v1 = b1[i] & 0xffff;
+ int v2 = b2[i] & 0xffff;
+ bo[i] = (short) ((v1 + v2) >> 1);
}
}
@@ -224,7 +226,7 @@ public class Main {
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
@@ -250,7 +252,9 @@ public class Main {
private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
- bo[i] = (short) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >> 1);
+ int v1 = b1[i] & 0xffff;
+ int v2 = b2[i] & 0xffff;
+ bo[i] = (short) ((v1 + v2 + 1) >> 1);
}
}
@@ -261,9 +265,9 @@ public class Main {
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<UMAX>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add1:i\d+>> Add [<<And2>>,<<I1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Add2:i\d+>> Add [<<And1>>,<<Add1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<And1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Shr:i\d+>> Shr [<<Add2>>,<<I1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Cnv:s\d+>> TypeConversion [<<Shr>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>> outer_loop:none
@@ -288,7 +292,9 @@ public class Main {
int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
for (int i = 0; i < min_length; i++) {
// Slightly different order in idiom does not confuse recognition.
- bo[i] = (short) ((b1[i] & 0xffff) + ((b2[i] & 0xffff) + 1) >> 1);
+ int v1 = b1[i] & 0xffff;
+ int v2 = b2[i] & 0xffff;
+ bo[i] = (short) (v1 + (v2 + 1) >> 1);
}
}
diff --git a/test/660-checker-simd-sad-byte/build b/test/660-checker-simd-sad-byte/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-byte/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-byte/src/Main.java b/test/660-checker-simd-sad-byte/src/Main.java
index 778d55c3ce..38003d18c6 100644
--- a/test/660-checker-simd-sad-byte/src/Main.java
+++ b/test/660-checker-simd-sad-byte/src/Main.java
@@ -90,8 +90,8 @@ public class Main {
/// CHECK-START: int Main.sadByte2Int(byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -121,8 +121,8 @@ public class Main {
/// CHECK-START: int Main.sadByte2IntAlt(byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Get1>>] loop:<<Loop>> outer_loop:none
@@ -154,8 +154,8 @@ public class Main {
/// CHECK-START: int Main.sadByte2IntAlt2(byte[], byte[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:b\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
diff --git a/test/660-checker-simd-sad-char/build b/test/660-checker-simd-sad-char/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-char/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-char/src/Main.java b/test/660-checker-simd-sad-char/src/Main.java
index 91c92f1179..18ae024231 100644
--- a/test/660-checker-simd-sad-char/src/Main.java
+++ b/test/660-checker-simd-sad-char/src/Main.java
@@ -59,8 +59,8 @@ public class Main {
/// CHECK-START: int Main.sadChar2Int(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -82,8 +82,8 @@ public class Main {
/// CHECK-START: int Main.sadChar2IntAlt(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Get1>>] loop:<<Loop>> outer_loop:none
@@ -107,8 +107,8 @@ public class Main {
/// CHECK-START: int Main.sadChar2IntAlt2(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
diff --git a/test/660-checker-simd-sad-int/build b/test/660-checker-simd-sad-int/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-int/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-int/src/Main.java b/test/660-checker-simd-sad-int/src/Main.java
index 29415fd2cf..5952c41c2d 100644
--- a/test/660-checker-simd-sad-int/src/Main.java
+++ b/test/660-checker-simd-sad-int/src/Main.java
@@ -22,8 +22,8 @@ public class Main {
/// CHECK-START: int Main.sadInt2Int(int[], int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -51,8 +51,8 @@ public class Main {
/// CHECK-START: int Main.sadInt2IntAlt(int[], int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub1:i\d+>> Sub [<<Get2>>,<<Get1>>] loop:<<Loop>> outer_loop:none
@@ -79,8 +79,8 @@ public class Main {
/// CHECK-START: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
diff --git a/test/660-checker-simd-sad-short/build b/test/660-checker-simd-sad-short/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-short/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-short/src/Main.java b/test/660-checker-simd-sad-short/src/Main.java
index 77c9e53e0c..ff74559292 100644
--- a/test/660-checker-simd-sad-short/src/Main.java
+++ b/test/660-checker-simd-sad-short/src/Main.java
@@ -61,8 +61,8 @@ public class Main {
/// CHECK-START: int Main.sadShort2Int(short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -92,8 +92,8 @@ public class Main {
/// CHECK-START: int Main.sadShort2IntAlt(short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Get1>>] loop:<<Loop>> outer_loop:none
@@ -125,8 +125,8 @@ public class Main {
/// CHECK-START: int Main.sadShort2IntAlt2(short[], short[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -161,8 +161,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons:i\d+>> IntConstant -7 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Cons>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>] loop:<<Loop>> outer_loop:none
@@ -193,8 +193,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons:i\d+>> IntConstant 7 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Cons>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -225,8 +225,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Cons:i\d+>> IntConstant 7 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get1>>,<<Cons>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>] loop:<<Loop>> outer_loop:none
diff --git a/test/660-checker-simd-sad-short2/build b/test/660-checker-simd-sad-short2/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-short2/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-short2/src/Main.java b/test/660-checker-simd-sad-short2/src/Main.java
index a1f98297c5..1ce0e2a266 100644
--- a/test/660-checker-simd-sad-short2/src/Main.java
+++ b/test/660-checker-simd-sad-short2/src/Main.java
@@ -59,8 +59,8 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC1:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC2:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
@@ -75,8 +75,8 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
@@ -106,8 +106,8 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC1:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC2:i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
@@ -123,13 +123,11 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- // Note: Get1+Cnv1 not simplified yet due to env use of Get1 in NullCheck for s2[i].
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Cnv1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get2>>,<<Get1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
@@ -158,8 +156,8 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) instruction_simplifier (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC1:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<BC2:\i\d+>> BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<BC1>>] loop:<<Loop>> outer_loop:none
@@ -175,13 +173,11 @@ public class Main {
/// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
- // Note: Get1+Cnv1 not simplified yet due to env use of Get1 in NullCheck for s2[i].
- /// CHECK-DAG: <<Get1:c\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Get1:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get2:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Cnv1:s\d+>> TypeConversion [<<Get1>>] loop:<<Loop>> outer_loop:none
- /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Cnv1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get1>>,<<Get2>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Intrin>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
diff --git a/test/660-checker-simd-sad-short3/build b/test/660-checker-simd-sad-short3/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/660-checker-simd-sad-short3/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/660-checker-simd-sad-short3/src/Main.java b/test/660-checker-simd-sad-short3/src/Main.java
index 877a5362ce..d0892c37c8 100644
--- a/test/660-checker-simd-sad-short3/src/Main.java
+++ b/test/660-checker-simd-sad-short3/src/Main.java
@@ -25,8 +25,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Param>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -56,8 +56,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Param:s\d+>> ParameterValue loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Param>>,<<Get>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -87,8 +87,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsI:i\d+>> IntConstant -32767 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> Add [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>] loop:<<Loop>> outer_loop:none
@@ -118,8 +118,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsI:i\d+>> IntConstant 32767 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<ConsI>>,<<Get>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -149,8 +149,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Get>>,<<Conv>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -181,8 +181,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<Conv:s\d+>> TypeConversion [{{i\d+}}] loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Sub:i\d+>> Sub [<<Conv>>,<<Get>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>] loop:<<Loop>> outer_loop:none
@@ -213,8 +213,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
@@ -248,8 +248,8 @@ public class Main {
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
/// CHECK-DAG: <<ConsI:i\d+>> IntConstant 110 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:s\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Add:i\d+>> [<<Get>>,<<ConsI>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Conv:s\d+>> TypeConversion [<<Add>>] loop:<<Loop>> outer_loop:none
diff --git a/test/661-checker-simd-reduc/build b/test/661-checker-simd-reduc/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/661-checker-simd-reduc/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/661-checker-simd-reduc/src/Main.java b/test/661-checker-simd-reduc/src/Main.java
index eff2018078..7b6f957b2a 100644
--- a/test/661-checker-simd-reduc/src/Main.java
+++ b/test/661-checker-simd-reduc/src/Main.java
@@ -55,8 +55,8 @@ public class Main {
/// CHECK-START: int Main.reductionInt(int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Get>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
@@ -130,8 +130,8 @@ public class Main {
/// CHECK-START: int Main.reductionIntToLoop(int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: <<Get:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: Add [<<Phi2>>,<<Get>>] loop:<<Loop1>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop1>> outer_loop:none
@@ -295,8 +295,8 @@ public class Main {
/// CHECK-START: int Main.reductionMinusInt(int[]) loop_optimization (before)
/// CHECK-DAG: <<Cons0:i\d+>> IntConstant 0 loop:none
/// CHECK-DAG: <<Cons1:i\d+>> IntConstant 1 loop:none
- /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
- /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: <<Phi2:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: <<Phi1:i\d+>> Phi [<<Cons0>>,{{i\d+}}] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: <<Get:i\d+>> ArrayGet [{{l\d+}},<<Phi1>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Sub [<<Phi2>>,<<Get>>] loop:<<Loop>> outer_loop:none
/// CHECK-DAG: Add [<<Phi1>>,<<Cons1>>] loop:<<Loop>> outer_loop:none
diff --git a/test/672-checker-throw-method/build b/test/672-checker-throw-method/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/672-checker-throw-method/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/672-checker-throw-method/src/Main.java b/test/672-checker-throw-method/src/Main.java
index a507133b91..360b52c79d 100644
--- a/test/672-checker-throw-method/src/Main.java
+++ b/test/672-checker-throw-method/src/Main.java
@@ -51,7 +51,7 @@ public class Main {
/// CHECK-START: void Main.doit1(int[]) code_sinking (before)
/// CHECK: begin_block
/// CHECK: <<Str:l\d+>> LoadString
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -61,7 +61,7 @@ public class Main {
//
/// CHECK-START: void Main.doit1(int[]) code_sinking (after)
/// CHECK: begin_block
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -109,7 +109,7 @@ public class Main {
/// CHECK-START: void Main.doit3(int[]) code_sinking (before)
/// CHECK: begin_block
/// CHECK: <<Str:l\d+>> LoadString
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -119,7 +119,7 @@ public class Main {
//
/// CHECK-START: void Main.doit3(int[]) code_sinking (after)
/// CHECK: begin_block
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
diff --git a/test/673-checker-throw-vmethod/build b/test/673-checker-throw-vmethod/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/673-checker-throw-vmethod/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/673-checker-throw-vmethod/src/Main.java b/test/673-checker-throw-vmethod/src/Main.java
index d0e1591bdb..206dfaf330 100644
--- a/test/673-checker-throw-vmethod/src/Main.java
+++ b/test/673-checker-throw-vmethod/src/Main.java
@@ -45,7 +45,7 @@ public class Main {
/// CHECK-START: void Main.doit1(int[]) code_sinking (before)
/// CHECK: begin_block
/// CHECK: <<Str:l\d+>> LoadString
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -55,7 +55,7 @@ public class Main {
//
/// CHECK-START: void Main.doit1(int[]) code_sinking (after)
/// CHECK: begin_block
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -103,7 +103,7 @@ public class Main {
/// CHECK-START: void Main.doit3(int[]) code_sinking (before)
/// CHECK: begin_block
/// CHECK: <<Str:l\d+>> LoadString
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
@@ -113,7 +113,7 @@ public class Main {
//
/// CHECK-START: void Main.doit3(int[]) code_sinking (after)
/// CHECK: begin_block
- /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: <<Tst:z\d+>> Equal
/// CHECK: If [<<Tst>>]
/// CHECK: end_block
/// CHECK: begin_block
diff --git a/test/679-checker-minmax/src/Main.java b/test/679-checker-minmax/src/Main.java
index abf8c279da..4b7265642a 100644
--- a/test/679-checker-minmax/src/Main.java
+++ b/test/679-checker-minmax/src/Main.java
@@ -37,6 +37,13 @@ public class Main {
//
/// CHECK-START: int Main.minI(int) instruction_simplifier (after)
/// CHECK-NOT: InvokeStaticOrDirect
+ //
+ /// CHECK-START-ARM64: int Main.minI(int) disassembly (after)
+ /// CHECK-NOT: mov {{w\d+}}, #0x14
+ /// CHECK: cmp {{w\d+}}, #0x14
+ // Check that the constant generation was handled by VIXL.
+ /// CHECK: mov w16, #0x14
+ /// CHECK: csel {{w\d+}}, {{w\d+}}, w16, lt
public static int minI(int a) {
return Math.min(a, 20);
}
@@ -55,6 +62,13 @@ public class Main {
//
/// CHECK-START: long Main.minL(long) instruction_simplifier (after)
/// CHECK-NOT: InvokeStaticOrDirect
+ //
+ /// CHECK-START-ARM64: long Main.minL(long) disassembly (after)
+ /// CHECK-NOT: mov {{x\d+}}, #0x14
+ /// CHECK: cmp {{x\d+}}, #0x14
+ // Check that the constant generation was handled by VIXL.
+ /// CHECK: mov x16, #0x14
+ /// CHECK: csel {{x\d+}}, {{x\d+}}, x16, lt
public static long minL(long a) {
return Math.min(a, 20L);
}
@@ -73,6 +87,13 @@ public class Main {
//
/// CHECK-START: int Main.maxI(int) instruction_simplifier (after)
/// CHECK-NOT: InvokeStaticOrDirect
+ //
+ /// CHECK-START-ARM64: int Main.maxI(int) disassembly (after)
+ /// CHECK-NOT: mov {{w\d+}}, #0x14
+ /// CHECK: cmp {{w\d+}}, #0x14
+ // Check that the constant generation was handled by VIXL.
+ /// CHECK: mov w16, #0x14
+ /// CHECK: csel {{w\d+}}, {{w\d+}}, w16, gt
public static int maxI(int a) {
return Math.max(a, 20);
}
@@ -91,11 +112,166 @@ public class Main {
//
/// CHECK-START: long Main.maxL(long) instruction_simplifier (after)
/// CHECK-NOT: InvokeStaticOrDirect
+ //
+ /// CHECK-START-ARM64: long Main.maxL(long) disassembly (after)
+ /// CHECK-NOT: mov {{x\d+}}, #0x14
+ /// CHECK: cmp {{x\d+}}, #0x14
+ // Check that the constant generation was handled by VIXL.
+ /// CHECK: mov x16, #0x14
+ /// CHECK: csel {{x\d+}}, {{x\d+}}, x16, gt
public static long maxL(long a) {
return Math.max(a, 20L);
}
//
+ // Special Cases
+ //
+
+ /// CHECK-START-ARM64: int Main.minIntConstantZero(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0x0
+ /// CHECK: cmp {{w\d+}}, #0x0 (0)
+ /// CHECK: csel {{w\d+}}, {{w\d+}}, wzr, lt
+ /// CHECK: ret
+ public static int minIntConstantZero(int a) {
+ return Math.min(a, 0);
+ }
+
+ /// CHECK-START-ARM64: int Main.minIntConstantOne(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0x1
+ /// CHECK: cmp {{w\d+}}, #0x1 (1)
+ /// CHECK: csinc {{w\d+}}, {{w\d+}}, wzr, lt
+ /// CHECK: ret
+ public static int minIntConstantOne(int a) {
+ return Math.min(a, 1);
+ }
+
+ /// CHECK-START-ARM64: int Main.minIntConstantMinusOne(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0xffffffff
+ /// CHECK: cmn {{w\d+}}, #0x1 (1)
+ /// CHECK: csinv {{w\d+}}, {{w\d+}}, wzr, lt
+ /// CHECK: ret
+ public static int minIntConstantMinusOne(int a) {
+ return Math.min(a, -1);
+ }
+
+ /// CHECK-START-ARM64: long Main.minLongConstantZero(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0x0
+ /// CHECK: cmp {{x\d+}}, #0x0 (0)
+ /// CHECK: csel {{x\d+}}, {{x\d+}}, xzr, lt
+ /// CHECK: ret
+ public static long minLongConstantZero(long a) {
+ return Math.min(a, 0L);
+ }
+
+ /// CHECK-START-ARM64: long Main.minLongConstantOne(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0x1
+ /// CHECK: cmp {{x\d+}}, #0x1 (1)
+ /// CHECK: csinc {{x\d+}}, {{x\d+}}, xzr, lt
+ /// CHECK: ret
+ public static long minLongConstantOne(long a) {
+ return Math.min(a, 1L);
+ }
+
+ /// CHECK-START-ARM64: long Main.minLongConstantMinusOne(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0xffffffffffffffff
+ /// CHECK: cmn {{x\d+}}, #0x1 (1)
+ /// CHECK: csinv {{x\d+}}, {{x\d+}}, xzr, lt
+ /// CHECK: ret
+ public static long minLongConstantMinusOne(long a) {
+ return Math.min(a, -1L);
+ }
+
+ /// CHECK-START-ARM64: int Main.maxIntConstantZero(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0x0
+ /// CHECK: cmp {{w\d+}}, #0x0 (0)
+ /// CHECK: csel {{w\d+}}, {{w\d+}}, wzr, gt
+ /// CHECK: ret
+ public static int maxIntConstantZero(int a) {
+ return Math.max(a, 0);
+ }
+
+ /// CHECK-START-ARM64: int Main.maxIntConstantOne(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0x1
+ /// CHECK: cmp {{w\d+}}, #0x1 (1)
+ /// CHECK: csinc {{w\d+}}, {{w\d+}}, wzr, gt
+ /// CHECK: ret
+ public static int maxIntConstantOne(int a) {
+ return Math.max(a, 1);
+ }
+
+ /// CHECK-START-ARM64: int Main.maxIntConstantMinusOne(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{w\d+}}, #0xffffffff
+ /// CHECK: cmn {{w\d+}}, #0x1 (1)
+ /// CHECK: csinv {{w\d+}}, {{w\d+}}, wzr, gt
+ /// CHECK: ret
+ public static int maxIntConstantMinusOne(int a) {
+ return Math.max(a, -1);
+ }
+
+ /// CHECK-START-ARM64: int Main.maxIntLargeConstant(int) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK: mov {{w\d+}}, #0x2001
+ /// CHECK: cmp {{w\d+}}, {{w\d+}}
+ // Check that constant generation was not handled by VIXL.
+ /// CHECK-NOT: mov {{w\d+}}, #0x2001
+ /// CHECK: csel {{w\d+}}, {{w\d+}}, {{w\d+}}, gt
+ /// CHECK: ret
+ public static int maxIntLargeConstant(int a) {
+ return Math.max(a, 8193);
+ }
+
+ /// CHECK-START-ARM64: long Main.maxLongConstantZero(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0x0
+ /// CHECK: cmp {{x\d+}}, #0x0 (0)
+ /// CHECK: csel {{x\d+}}, {{x\d+}}, xzr, gt
+ /// CHECK: ret
+ public static long maxLongConstantZero(long a) {
+ return Math.max(a, 0L);
+ }
+
+ /// CHECK-START-ARM64: long Main.maxLongConstantOne(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0x1
+ /// CHECK: cmp {{x\d+}}, #0x1 (1)
+ /// CHECK: csinc {{x\d+}}, {{x\d+}}, xzr, gt
+ /// CHECK: ret
+ public static long maxLongConstantOne(long a) {
+ return Math.max(a, 1L);
+ }
+
+ /// CHECK-START-ARM64: long Main.maxLongConstantMinusOne(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK-NOT: mov {{x\d+}}, #0xffffffffffffffff
+ /// CHECK: cmn {{x\d+}}, #0x1 (1)
+ /// CHECK: csinv {{x\d+}}, {{x\d+}}, xzr, gt
+ /// CHECK: ret
+ public static long maxLongConstantMinusOne(long a) {
+ return Math.max(a, -1L);
+ }
+
+ /// CHECK-START-ARM64: long Main.maxLongLargeConstant(long) disassembly (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ /// CHECK: mov {{x\d+}}, #0x2001
+ /// CHECK: cmp {{x\d+}}, {{x\d+}}
+ // Check that constant generation was not handled by VIXL.
+ /// CHECK-NOT: mov {{x\d+}}, #0x2001
+ /// CHECK: csel {{x\d+}}, {{x\d+}}, {{x\d+}}, gt
+ /// CHECK: ret
+ public static long maxLongLargeConstant(long a) {
+ return Math.max(a, 8193L);
+ }
+
+ //
// Different types.
//
@@ -538,12 +714,40 @@ public class Main {
// Intrinsics.
expectEquals(10, minI(10));
expectEquals(20, minI(25));
+ expectEquals(-1, minIntConstantZero(-1));
+ expectEquals(0, minIntConstantZero(1));
+ expectEquals(0, minIntConstantOne(0));
+ expectEquals(1, minIntConstantOne(2));
+ expectEquals(-2, minIntConstantMinusOne(-2));
+ expectEquals(-1, minIntConstantMinusOne(0));
expectEquals(10L, minL(10L));
expectEquals(20L, minL(25L));
+ expectEquals(-1L, minLongConstantZero(-1L));
+ expectEquals(0L, minLongConstantZero(1L));
+ expectEquals(0L, minLongConstantOne(0L));
+ expectEquals(1L, minLongConstantOne(2L));
+ expectEquals(-2L, minLongConstantMinusOne(-2L));
+ expectEquals(-1L, minLongConstantMinusOne(0L));
expectEquals(20, maxI(10));
expectEquals(25, maxI(25));
+ expectEquals(0, maxIntConstantZero(-1));
+ expectEquals(1, maxIntConstantZero(1));
+ expectEquals(1, maxIntConstantOne(0));
+ expectEquals(2, maxIntConstantOne(2));
+ expectEquals(-1, maxIntConstantMinusOne(-2));
+ expectEquals(0, maxIntConstantMinusOne(0));
+ expectEquals(8193, maxIntLargeConstant(8192));
+ expectEquals(9000, maxIntLargeConstant(9000));
expectEquals(20L, maxL(10L));
expectEquals(25L, maxL(25L));
+ expectEquals(0L, maxLongConstantZero(-1L));
+ expectEquals(1L, maxLongConstantZero(1L));
+ expectEquals(1L, maxLongConstantOne(0L));
+ expectEquals(2L, maxLongConstantOne(2L));
+ expectEquals(-1L, maxLongConstantMinusOne(-2L));
+ expectEquals(0L, maxLongConstantMinusOne(0L));
+ expectEquals(8193L, maxLongLargeConstant(8192L));
+ expectEquals(9000L, maxLongLargeConstant(9000L));
// Types.
expectEquals(10, min1(10, 20));
expectEquals(10, min2(10, 20));
diff --git a/test/704-multiply-accumulate/build b/test/704-multiply-accumulate/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/704-multiply-accumulate/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/706-checker-scheduler/build b/test/706-checker-scheduler/build
deleted file mode 100644
index d85147f17b..0000000000
--- a/test/706-checker-scheduler/build
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
index 25e4fad714..eb985d0032 100644
--- a/test/706-checker-scheduler/src/Main.java
+++ b/test/706-checker-scheduler/src/Main.java
@@ -35,8 +35,8 @@ public class Main {
/// CHECK-START-ARM64: int Main.arrayAccess() scheduler (before)
/// CHECK: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<i0:i\d+>> Phi
/// CHECK: <<res0:i\d+>> Phi
+ /// CHECK: <<i0:i\d+>> Phi
/// CHECK: <<Array:i\d+>> IntermediateAddress
/// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>]
/// CHECK: <<res1:i\d+>> Add [<<res0>>,<<ArrayGet1>>]
@@ -46,8 +46,8 @@ public class Main {
/// CHECK-START-ARM64: int Main.arrayAccess() scheduler (after)
/// CHECK: <<Const1:i\d+>> IntConstant 1
- /// CHECK: <<i0:i\d+>> Phi
/// CHECK: <<res0:i\d+>> Phi
+ /// CHECK: <<i0:i\d+>> Phi
/// CHECK: <<Array:i\d+>> IntermediateAddress
/// CHECK: <<ArrayGet1:i\d+>> ArrayGet [<<Array>>,<<i0>>]
/// CHECK: <<i1:i\d+>> Add [<<i0>>,<<Const1>>]
diff --git a/test/712-varhandle-invocations/build b/test/712-varhandle-invocations/build
index 253765be91..9a6e96e18b 100755
--- a/test/712-varhandle-invocations/build
+++ b/test/712-varhandle-invocations/build
@@ -32,8 +32,4 @@ MANUAL_TESTS=$(cd "${MANUAL_SRC}" && find . -name 'Var*Tests.java' | sed -e 's@.
# Generate tests and Main that covers both the generated tests and manual tests
python3 ./util-src/generate_java.py "${GENERATED_SRC}" ${MANUAL_TESTS}
-# Desugar is not happy with our Java 9 byte code, it shouldn't be necessary here anyway.
-export USE_DESUGAR=false
-
-# Invoke default build with increased heap size for dx
-./default-build "$@" --experimental var-handles --dx-vm-option -JXmx384m
+./default-build "$@" --experimental var-handles
diff --git a/test/715-clinit-implicit-parameter-annotations/build b/test/715-clinit-implicit-parameter-annotations/build
index 4753c8c7dc..2b5f92cc88 100644
--- a/test/715-clinit-implicit-parameter-annotations/build
+++ b/test/715-clinit-implicit-parameter-annotations/build
@@ -17,8 +17,4 @@
# Make us exit on a failure
set -e
-# Always use D8 as DX does not support propagating parameter name and
-# access_flag information.
-export USE_D8=true
-
./default-build "$@" --experimental parameter-annotations
diff --git a/test/717-integer-value-of/expected.txt b/test/717-integer-value-of/expected.txt
new file mode 100644
index 0000000000..6a5618ebc6
--- /dev/null
+++ b/test/717-integer-value-of/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/717-integer-value-of/info.txt b/test/717-integer-value-of/info.txt
new file mode 100644
index 0000000000..b65d679ab1
--- /dev/null
+++ b/test/717-integer-value-of/info.txt
@@ -0,0 +1,2 @@
+Regression test for JIT crash when compiling Integer.valueOf() intrinsic after
+having messed up the IntegerCache through reflection.
diff --git a/test/717-integer-value-of/src/Main.java b/test/717-integer-value-of/src/Main.java
new file mode 100644
index 0000000000..557b65c1c7
--- /dev/null
+++ b/test/717-integer-value-of/src/Main.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ if (!isDalvik) {
+ // This test is ART-specific. Just fake the expected output.
+ System.out.println("JNI_OnLoad called");
+ return;
+ }
+ System.loadLibrary(args[0]);
+ if (!hasJit()) {
+ return;
+ }
+ testValueOfArg();
+ testValueOfConst();
+ }
+
+ public static void testValueOfArg() throws Exception {
+ final VolatileFlag start_end = new VolatileFlag();
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+ Field cacheField = integerCacheClass.getDeclaredField("cache");
+ cacheField.setAccessible(true);
+
+ Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+ Integer[] alt_cache = new Integer[cache.length];
+ System.arraycopy(cache, 0, alt_cache, 0, cache.length);
+
+ // Let the main thread know that everything is set up.
+ synchronized (start_end) {
+ start_end.notify();
+ }
+ while (!start_end.flag) {
+ cacheField.set(integerCacheClass, alt_cache);
+ cacheField.set(integerCacheClass, cache);
+ }
+ } catch (Throwable t) {
+ throw new Error(t);
+ }
+ }
+ };
+ synchronized (start_end) {
+ t.start();
+ start_end.wait(); // Wait for the thread to start.
+ }
+ // Previously, this may have used an invalid IntegerValueOfInfo (because of seeing
+ // the `alt_cache` which is not in the boot image) when asked to emit code after
+ // using a valid info (using `cache`) when requesting locations.
+ ensureJitCompiled(Main.class, "getAsInteger");
+
+ start_end.flag = true;
+ t.join();
+
+ Runtime.getRuntime().gc(); // Collect the `alt_cache`.
+
+ // If `getAsInteger()` was miscompiled, it shall try to retrieve an Integer reference
+ // from a collected array (low = 0, high = 0 means that this happens only for value 0),
+ // reading from a bogus location. Depending on the GC type, this bogus memory access may
+ // yield SIGSEGV or `null` or even a valid reference.
+ Integer new0 = getAsInteger(0);
+ int value = (int) new0;
+
+ if (value != 0) {
+ throw new Error("value is " + value);
+ }
+ }
+
+ public static void testValueOfConst() throws Exception {
+ Class<?> integerCacheClass = Class.forName("java.lang.Integer$IntegerCache");
+ Field cacheField = integerCacheClass.getDeclaredField("cache");
+ cacheField.setAccessible(true);
+ Field lowField = integerCacheClass.getDeclaredField("low");
+ lowField.setAccessible(true);
+
+ Integer[] cache = (Integer[]) cacheField.get(integerCacheClass);
+ int low = (int) lowField.get(integerCacheClass);
+ Integer old42 = cache[42 - low];
+ cache[42 - low] = new Integer(42);
+
+ // This used to hit
+ // DCHECK(boxed != nullptr &&
+ // Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed));
+ // when compiling the intrinsic.
+ ensureJitCompiled(Main.class, "get42AsInteger");
+
+ cache[42 - low] = old42;
+ Runtime.getRuntime().gc();
+ Integer new42 = get42AsInteger();
+
+ // If the DCHECK() was removed, MterpInvokeVirtualQuick() used to crash here.
+ // (Note: Our fault handler on x86-64 then also crashed.)
+ int value = (int) new42;
+
+ if (value != (int) old42) {
+ throw new Error("value is " + value);
+ }
+ }
+
+ private static class VolatileFlag {
+ public volatile boolean flag = false;
+ }
+
+ public static Integer get42AsInteger() {
+ return Integer.valueOf(42);
+ }
+
+ public static Integer getAsInteger(int value) {
+ return Integer.valueOf(value);
+ }
+
+ private native static boolean hasJit();
+ private static native void ensureJitCompiled(Class<?> itf, String method_name);
+
+ private final static boolean isDalvik = System.getProperty("java.vm.name").equals("Dalvik");
+}
diff --git a/test/551-checker-shifter-operand/build b/test/804-class-extends-itself/build
index d85147f17b..71cb3cacdc 100644
--- a/test/551-checker-shifter-operand/build
+++ b/test/804-class-extends-itself/build
@@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# See b/65168732
-export USE_D8=false
-
-./default-build "$@"
+# Use old API level to create DEX file with 035 version. Stricter
+# checking introduced with DEX file version 37 rejects class
+# otherwise (see DexFile::kClassDefinitionOrderEnforcedVersion).
+./default-build "$@" --api-level 13
diff --git a/test/910-methods/check b/test/910-methods/check
deleted file mode 100644
index 61846adf9b..0000000000
--- a/test/910-methods/check
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-./default-check "$@"
-if [[ "$?" == "0" ]]; then
- exit 0;
-fi
-
-# We cannot always correctly determine if D8 was used because of (b/68406220).
-# So we are just going to try to see it matches the expect output of D8 no
-# matter what.
-patch -p0 expected.txt < expected_d8.diff
-
-./default-check "$@"
diff --git a/test/910-methods/expected.txt b/test/910-methods/expected.txt
index 45de3db1fb..6672dc0d09 100644
--- a/test/910-methods/expected.txt
+++ b/test/910-methods/expected.txt
@@ -4,7 +4,7 @@ class java.lang.Object
Max locals: 3
Argument size: 1
Location start: 0
-Location end: 39
+Location end: 36
Is native: false
Is obsolete: false
Is synthetic: false
diff --git a/test/910-methods/expected_d8.diff b/test/910-methods/expected_d8.diff
deleted file mode 100644
index 2c5d085418..0000000000
--- a/test/910-methods/expected_d8.diff
+++ /dev/null
@@ -1,4 +0,0 @@
-7c7
-< Location end: 39
----
-> Location end: 36
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index b0a400ab75..995701dee1 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -79,7 +79,7 @@ From top
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -99,7 +99,7 @@ From top
---------
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -120,13 +120,13 @@ From top
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
---------
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -153,7 +153,7 @@ From bottom
###########################
From top
---------
- printOrWait (IILart/ControlData;)V 45 54
+ printOrWait (IILart/ControlData;)V 43 54
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -188,7 +188,7 @@ From top
foo (IIILart/ControlData;)I 0 21
run ()V 4 61
---------
- printOrWait (IILart/ControlData;)V 45 54
+ printOrWait (IILart/ControlData;)V 43 54
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -274,7 +274,7 @@ AllTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -284,7 +284,7 @@ AllTraces Thread 1
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -294,7 +294,7 @@ AllTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -304,7 +304,7 @@ AllTraces Thread 3
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -314,7 +314,7 @@ AllTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -324,7 +324,7 @@ AllTraces Thread 5
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -334,7 +334,7 @@ AllTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -344,7 +344,7 @@ AllTraces Thread 7
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -354,7 +354,7 @@ AllTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -364,7 +364,7 @@ AllTraces Thread 9
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -399,7 +399,7 @@ AllTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -422,7 +422,7 @@ AllTraces Thread 1
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -445,7 +445,7 @@ AllTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -468,7 +468,7 @@ AllTraces Thread 3
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -491,7 +491,7 @@ AllTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -514,7 +514,7 @@ AllTraces Thread 5
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -537,7 +537,7 @@ AllTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -560,7 +560,7 @@ AllTraces Thread 7
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -583,7 +583,7 @@ AllTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -606,7 +606,7 @@ AllTraces Thread 9
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -683,7 +683,7 @@ ThreadListTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -693,7 +693,7 @@ ThreadListTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -703,7 +703,7 @@ ThreadListTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -713,7 +713,7 @@ ThreadListTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -723,7 +723,7 @@ ThreadListTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -740,7 +740,7 @@ ThreadListTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -763,7 +763,7 @@ ThreadListTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -786,7 +786,7 @@ ThreadListTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -809,7 +809,7 @@ ThreadListTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -832,7 +832,7 @@ ThreadListTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 24 47
+ printOrWait (IILart/ControlData;)V 22 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -870,7 +870,7 @@ JVMTI_ERROR_ILLEGAL_ARGUMENT
[public final native void java.lang.Object.wait(long,int) throws java.lang.InterruptedException, ffffffff]
[public final void java.lang.Object.wait(long) throws java.lang.InterruptedException, 1]
[public final void java.lang.Object.wait() throws java.lang.InterruptedException, 2]
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 18]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 16]
[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
@@ -894,7 +894,7 @@ JVMTI_ERROR_NO_MORE_FRAMES
###########################
17
JVMTI_ERROR_ILLEGAL_ARGUMENT
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2d]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2b]
[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
diff --git a/test/913-heaps/check b/test/913-heaps/check
deleted file mode 100644
index f7f8dab8cd..0000000000
--- a/test/913-heaps/check
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# D8 has a different set of bytecode offsets/method IDs in the expected.txt
-if [[ "$USE_D8" == true ]]; then
- patch -p0 expected.txt < expected_d8.diff
-fi
-
-./default-check "$@"
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 57c2dc660a..01d374bebd 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -1,8 +1,7 @@
---
true true
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780000, length=-1]
@@ -46,9 +45,10 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
@@ -99,8 +99,7 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
3@1001 --(class)--> 1001@0 [size=123456780016, length=-1]
---
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
---
@@ -112,9 +111,10 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
@@ -159,7 +159,7 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
10007@0 (instance, float, index=13) 000000003f9d70a4
10008
--- klass ---
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
500@0 --(array-element@1)--> 2@1000 [size=16, length=-1]
@@ -174,9 +174,10 @@ root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonR
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
@@ -198,8 +199,7 @@ root@root --(thread)--> 1@1000 [size=16, length=-1]
---
---- untagged objects
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
@@ -243,9 +243,10 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
@@ -289,7 +290,6 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
---
---- tagged classes
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=136, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=136, length=-1]
root@root --(thread)--> 3000@0 [size=136, length=-1]
1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
@@ -344,7 +344,7 @@ root@root --(thread)--> 3000@0 [size=136, length=-1]
6@1000 --(class)--> 1000@0 [size=123456780065, length=-1]
---
---- untagged classes
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
3@1001 --(field@4)--> 4@1000 [size=16, length=-1]
@@ -363,9 +363,10 @@ root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonR
---
root@root --(jni-global)--> 1@1000 [size=16, length=-1]
root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
-root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
root@root --(thread)--> 1@1000 [size=16, length=-1]
1@1000 --(field@2)--> 2@1000 [size=16, length=-1]
1@1000 --(field@3)--> 3@1001 [size=24, length=-1]
diff --git a/test/913-heaps/expected_d8.diff b/test/913-heaps/expected_d8.diff
deleted file mode 100644
index 1ad0cbdd3b..0000000000
--- a/test/913-heaps/expected_d8.diff
+++ /dev/null
@@ -1,70 +0,0 @@
-4,5c4
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-49c48
-< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-51c50,51
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-102,103c102
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-115c114
-< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-117c116,117
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-162c162
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-177c177
-< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-179c179,180
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-201,202c202
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
-< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-246c246
-< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-248c248,249
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
-292d292
-< root@root --(stack-local[id=1,tag=3000,depth=3,method=doFollowReferencesTest,vreg=1,location= 28])--> 3000@0 [size=136, length=-1]
-347c347
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=13,location= 30])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
-366c366
-< root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=11,location= 8])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=10,location= 8])--> 1@1000 [size=16, length=-1]
-368c368,369
-< root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 19])--> 1@1000 [size=16, length=-1]
----
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
-> root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc
index 00a7ea786e..b07554ca46 100644
--- a/test/913-heaps/heaps.cc
+++ b/test/913-heaps/heaps.cc
@@ -168,6 +168,12 @@ extern "C" JNIEXPORT jobjectArray JNICALL Java_art_Test913_followReferences(
if (reference_kind == JVMTI_HEAP_REFERENCE_JNI_GLOBAL && class_tag == 0) {
return 0;
}
+ // Ignore HEAP_REFERENCE_OTHER roots because these are vm-internal roots and can vary
+ // depending on the configuration of the runtime (notably having trampoline tracing will add a
+ // lot of these).
+ if (reference_kind == JVMTI_HEAP_REFERENCE_OTHER) {
+ return 0;
+ }
// Ignore classes (1000 <= tag < 3000) for thread objects. These can be held by the JIT.
if (reference_kind == JVMTI_HEAP_REFERENCE_THREAD && class_tag == 0 &&
(1000 <= *tag_ptr && *tag_ptr < 3000)) {
diff --git a/test/952-invoke-custom/build b/test/952-invoke-custom/build
index 2caca94d7f..a70fc20c05 100755
--- a/test/952-invoke-custom/build
+++ b/test/952-invoke-custom/build
@@ -14,39 +14,60 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# make us exit on a failure
+# Stop on failure.
set -e
-ASM_JAR="${ANDROID_BUILD_TOP}/prebuilts/misc/common/asm/asm-6.0.jar"
-INTERMEDIATE_CLASSES=classes-intermediate
-CLASSES=classes
+export ASM_JAR="${ANDROID_BUILD_TOP}/prebuilts/misc/common/asm/asm-6.0.jar"
-DEXER="${DX:-dx}"
-if [ "${USE_D8=false}" = "true" ]; then
- DEXER="${ANDROID_HOST_OUT}/bin/d8-compat-dx"
-fi
+export ORIGINAL_JAVAC="$JAVAC"
-# Create directory for intermediate classes
-rm -rf "${INTERMEDIATE_CLASSES}"
-mkdir "${INTERMEDIATE_CLASSES}"
+# Wrapper function for javac which invokes the compiler and applies
+# transforms to class files after compilation.
+function javac_wrapper {
+ set -e # Stop on error - the caller script may not have this set.
-# Generate intermediate classes that will allow transform to be applied to test classes
-JAVAC_ARGS="${JAVAC_ARGS} -source 1.8 -target 1.8 -cp ${ASM_JAR}"
-${JAVAC:-javac} ${JAVAC_ARGS} -d ${INTERMEDIATE_CLASSES} $(find src -name '*.java')
+ # Update arguments to add transformer and ASM to the compiler classpath.
+ local args=()
+ local classpath="./transformer.jar:$ASM_JAR"
+ while [ $# -ne 0 ] ; do
+ case $1 in
+ -cp|-classpath|--class-path)
+ shift
+ shift
+ args+=(-cp $classpath)
+ ;;
+ *)
+ args+=("$1")
+ shift
+ ;;
+ esac
+ done
-# Create directory for transformed classes
-rm -rf "${CLASSES}"
-mkdir "${CLASSES}"
+ # Compile.
+ $ORIGINAL_JAVAC "${args[@]}"
-# Run transform
-for class in ${INTERMEDIATE_CLASSES}/*.class ; do
- transformed_class=${CLASSES}/$(basename ${class})
- ${JAVA:-java} -cp "${ASM_JAR}:${INTERMEDIATE_CLASSES}" transformer.IndyTransformer ${class} ${transformed_class}
-done
+ # Move original classes to intermediate location.
+ mv classes intermediate-classes
+ mkdir classes
-# Create DEX
-DX_FLAGS="${DX_FLAGS} --min-sdk-version=26 --debug --dump-width=1000"
-${DEXER} -JXmx256m --dex ${DX_FLAGS} --dump-to=${CLASSES}.lst --output=classes.dex ${CLASSES}
+ # Transform intermediate classes.
+ local transformer_args="-cp ${ASM_JAR}:transformer.jar transformer.IndyTransformer"
+ for class in intermediate-classes/*.class ; do
+ local transformed_class=classes/$(basename ${class})
+ ${JAVA:-java} ${transformer_args} $PWD/${class} ${transformed_class}
+ done
+}
-# Zip DEX to file name expected by test runner
-zip ${TEST_NAME:-classes-dex}.jar classes.dex
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+# Build the transformer to apply to compiled classes.
+mkdir classes
+${ORIGINAL_JAVAC:-javac} ${JAVAC_ARGS} -cp "${ASM_JAR}" -d classes $(find util-src -name '*.java')
+jar -cf transformer.jar -C classes transformer/ -C classes annotations/
+rm -rf classes
+
+# Use API level 28 for invoke-custom bytecode support.
+DESUGAR=false ./default-build "$@" --api-level 28
diff --git a/test/952-invoke-custom/src/TestReturnValues.java b/test/952-invoke-custom/src/TestReturnValues.java
new file mode 100644
index 0000000000..8450a44310
--- /dev/null
+++ b/test/952-invoke-custom/src/TestReturnValues.java
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import annotations.BootstrapMethod;
+import annotations.CalledByIndy;
+import java.lang.invoke.CallSite;
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+class TestReturnValues extends TestBase {
+ static CallSite bsm(MethodHandles.Lookup lookup, String name, MethodType methodType)
+ throws Throwable {
+ MethodHandle mh = lookup.findStatic(TestReturnValues.class, name, methodType);
+ return new ConstantCallSite(mh);
+ }
+
+ //
+ // Methods that pass through a single argument.
+ // Used to check return path.
+ //
+ static byte passThrough(byte value) {
+ return value;
+ }
+
+ static char passThrough(char value) {
+ return value;
+ }
+
+ static double passThrough(double value) {
+ return value;
+ }
+
+ static float passThrough(float value) {
+ return value;
+ }
+
+ static int passThrough(int value) {
+ return value;
+ }
+
+ static Object passThrough(Object value) {
+ return value;
+ }
+
+ static Object[] passThrough(Object[] value) {
+ return value;
+ }
+
+ static long passThrough(long value) {
+ return value;
+ }
+
+ static short passThrough(short value) {
+ return value;
+ }
+
+ static void passThrough() {}
+
+ static boolean passThrough(boolean value) {
+ return value;
+ }
+
+ // byte
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = byte.class,
+ parameterTypes = {byte.class})
+ private static byte passThroughCallSite(byte value) {
+ assertNotReached();
+ return (byte) 0;
+ }
+
+ // char
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = char.class,
+ parameterTypes = {char.class})
+ private static char passThroughCallSite(char value) {
+ assertNotReached();
+ return 'Z';
+ }
+
+ // double
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = double.class,
+ parameterTypes = {double.class})
+ private static double passThroughCallSite(double value) {
+ assertNotReached();
+ return Double.NaN;
+ }
+
+ // float
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = float.class,
+ parameterTypes = {float.class})
+ private static float passThroughCallSite(float value) {
+ assertNotReached();
+ return Float.NaN;
+ }
+
+ // int
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = int.class,
+ parameterTypes = {int.class})
+ private static int passThroughCallSite(int value) {
+ assertNotReached();
+ return 0;
+ }
+
+ // long
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = long.class,
+ parameterTypes = {long.class})
+ private static long passThroughCallSite(long value) {
+ assertNotReached();
+ return Long.MIN_VALUE;
+ }
+
+ // Object
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = Object.class,
+ parameterTypes = {Object.class})
+ private static Object passThroughCallSite(Object value) {
+ assertNotReached();
+ return null;
+ }
+
+ // Object[]
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = Object[].class,
+ parameterTypes = {Object[].class})
+ private static Object[] passThroughCallSite(Object[] value) {
+ assertNotReached();
+ return null;
+ }
+
+ // short
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = short.class,
+ parameterTypes = {short.class})
+ private static short passThroughCallSite(short value) {
+ assertNotReached();
+ return (short) 0;
+ }
+
+ // void
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = void.class,
+ parameterTypes = {})
+ private static void passThroughCallSite() {
+ assertNotReached();
+ }
+
+ // boolean
+ @CalledByIndy(
+ bootstrapMethod =
+ @BootstrapMethod(enclosingType = TestReturnValues.class, name = "bsm"),
+ fieldOrMethodName = "passThrough",
+ returnType = boolean.class,
+ parameterTypes = {boolean.class})
+ private static boolean passThroughCallSite(boolean value) {
+ assertNotReached();
+ return false;
+ }
+
+ private static void testByteReturnValues() {
+ byte[] values = {Byte.MIN_VALUE, Byte.MAX_VALUE};
+ for (byte value : values) {
+ assertEquals(value, (byte) passThroughCallSite(value));
+ }
+ }
+
+ private static void testCharReturnValues() {
+ char[] values = {
+ Character.MIN_VALUE,
+ Character.MAX_HIGH_SURROGATE,
+ Character.MAX_LOW_SURROGATE,
+ Character.MAX_VALUE
+ };
+ for (char value : values) {
+ assertEquals(value, (char) passThroughCallSite(value));
+ }
+ }
+
+ private static void testDoubleReturnValues() {
+ double[] values = {
+ Double.MIN_VALUE,
+ Double.MIN_NORMAL,
+ Double.NaN,
+ Double.POSITIVE_INFINITY,
+ Double.NEGATIVE_INFINITY,
+ Double.MAX_VALUE
+ };
+ for (double value : values) {
+ assertEquals(value, (double) passThroughCallSite(value));
+ }
+ }
+
+ private static void testFloatReturnValues() {
+ float[] values = {
+ Float.MIN_VALUE,
+ Float.MIN_NORMAL,
+ Float.NaN,
+ Float.POSITIVE_INFINITY,
+ Float.NEGATIVE_INFINITY,
+ Float.MAX_VALUE
+ };
+ for (float value : values) {
+ assertEquals(value, (float) passThroughCallSite(value));
+ }
+ }
+
+ private static void testIntReturnValues() {
+ int[] values = {Integer.MIN_VALUE, Integer.MAX_VALUE, Integer.SIZE, -Integer.SIZE};
+ for (int value : values) {
+ assertEquals(value, (int) passThroughCallSite(value));
+ }
+ }
+
+ private static void testLongReturnValues() {
+ long[] values = {Long.MIN_VALUE, Long.MAX_VALUE, (long) Long.SIZE, (long) -Long.SIZE};
+ for (long value : values) {
+ assertEquals(value, (long) passThroughCallSite(value));
+ }
+ }
+
+ private static void testObjectReturnValues() {
+ Object[] values = {null, "abc", Integer.valueOf(123)};
+ for (Object value : values) {
+ assertEquals(value, (Object) passThroughCallSite(value));
+ }
+
+ Object[] otherValues = (Object[]) passThroughCallSite(values);
+ assertEquals(values.length, otherValues.length);
+ for (int i = 0; i < otherValues.length; ++i) {
+ assertEquals(values[i], otherValues[i]);
+ }
+ }
+
+ private static void testShortReturnValues() {
+ short[] values = {
+ Short.MIN_VALUE, Short.MAX_VALUE, (short) Short.SIZE, (short) -Short.SIZE
+ };
+ for (short value : values) {
+ assertEquals(value, (short) passThroughCallSite(value));
+ }
+ }
+
+ private static void testVoidReturnValues() {
+ long l = Long.MIN_VALUE;
+ double d = Double.MIN_VALUE;
+ passThroughCallSite(); // Initializes call site
+ assertEquals(Long.MIN_VALUE, l);
+ assertEquals(Double.MIN_VALUE, d);
+
+ l = Long.MAX_VALUE;
+ d = Double.MAX_VALUE;
+ passThroughCallSite(); // re-uses existing call site
+ assertEquals(Long.MAX_VALUE, l);
+ assertEquals(Double.MAX_VALUE, d);
+ }
+
+ private static void testBooleanReturnValues() {
+ boolean[] values = {true, false, true, false, false};
+ for (boolean value : values) {
+ assertEquals(value, (boolean) passThroughCallSite(value));
+ }
+ }
+
+ public static void test() {
+ System.out.println(TestReturnValues.class.getName());
+ // Two passes here - the first is for the call site creation and invoke path, the second
+ // for the lookup and invoke path.
+ for (int pass = 0; pass < 2; ++pass) {
+ testByteReturnValues(); // B
+ testCharReturnValues(); // C
+ testDoubleReturnValues(); // D
+ testFloatReturnValues(); // F
+ testIntReturnValues(); // I
+ testLongReturnValues(); // J
+ testObjectReturnValues(); // L
+ testShortReturnValues(); // S
+ testVoidReturnValues(); // S
+ testBooleanReturnValues(); // Z
+ }
+ }
+}
diff --git a/test/952-invoke-custom/src/annotations/BootstrapMethod.java b/test/952-invoke-custom/util-src/annotations/BootstrapMethod.java
index c16783007f..c16783007f 100644
--- a/test/952-invoke-custom/src/annotations/BootstrapMethod.java
+++ b/test/952-invoke-custom/util-src/annotations/BootstrapMethod.java
diff --git a/test/952-invoke-custom/src/annotations/CalledByIndy.java b/test/952-invoke-custom/util-src/annotations/CalledByIndy.java
index c4d13a2af4..c4d13a2af4 100644
--- a/test/952-invoke-custom/src/annotations/CalledByIndy.java
+++ b/test/952-invoke-custom/util-src/annotations/CalledByIndy.java
diff --git a/test/952-invoke-custom/src/annotations/Constant.java b/test/952-invoke-custom/util-src/annotations/Constant.java
index 7966a524ba..7966a524ba 100644
--- a/test/952-invoke-custom/src/annotations/Constant.java
+++ b/test/952-invoke-custom/util-src/annotations/Constant.java
diff --git a/test/952-invoke-custom/src/transformer/IndyTransformer.java b/test/952-invoke-custom/util-src/transformer/IndyTransformer.java
index 45cb4760c9..d21dbbeabc 100644
--- a/test/952-invoke-custom/src/transformer/IndyTransformer.java
+++ b/test/952-invoke-custom/util-src/transformer/IndyTransformer.java
@@ -69,7 +69,7 @@ import org.objectweb.asm.Type;
*
* <p>In the example above, this results in add() being replaced by invocations of magicAdd().
*/
-class IndyTransformer {
+public class IndyTransformer {
static class BootstrapBuilder extends ClassVisitor {
@@ -164,10 +164,9 @@ class IndyTransformer {
}
private static void transform(Path inputClassPath, Path outputClassPath) throws Throwable {
+ URL url = inputClassPath.getParent().toUri().toURL();
URLClassLoader classLoader =
- new URLClassLoader(
- new URL[] {inputClassPath.toUri().toURL()},
- ClassLoader.getSystemClassLoader());
+ new URLClassLoader(new URL[] {url}, ClassLoader.getSystemClassLoader());
String inputClassName = inputClassPath.getFileName().toString().replace(".class", "");
Class<?> inputClass = classLoader.loadClass(inputClassName);
Map<String, CalledByIndy> callsiteMap = new HashMap<>();
diff --git a/test/956-methodhandles/expected.txt b/test/956-methodhandles/expected.txt
index 6954c22ccb..a8b609bd21 100644
--- a/test/956-methodhandles/expected.txt
+++ b/test/956-methodhandles/expected.txt
@@ -15,6 +15,7 @@ H.chatter()
Chatty.chatter()
Chatty.chatter()
String constructors done.
+testReturnValues done.
testReferenceReturnValueConversions done.
testPrimitiveReturnValueConversions done.
Hi
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index dee818a4ee..11d6ead683 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -102,6 +102,7 @@ public class Main {
testAsType();
testConstructors();
testStringConstructors();
+ testReturnValues();
testReturnValueConversions();
testVariableArity();
testVariableArity_MethodHandles_bind();
@@ -873,6 +874,89 @@ public class Main {
System.out.println("String constructors done.");
}
+ private static void testReturnValues() throws Throwable {
+ Lookup lookup = MethodHandles.lookup();
+
+ // byte
+ MethodHandle mhByteValue =
+ lookup.findVirtual(Byte.class, "byteValue", MethodType.methodType(byte.class));
+ assertEquals((byte) -77, (byte) mhByteValue.invokeExact(Byte.valueOf((byte) -77)));
+ assertEquals((byte) -77, (byte) mhByteValue.invoke(Byte.valueOf((byte) -77)));
+
+ // char
+ MethodHandle mhCharacterValue =
+ lookup.findStaticGetter(Character.class, "MAX_SURROGATE", char.class);
+ assertEquals(Character.MAX_SURROGATE, (char) mhCharacterValue.invokeExact());
+ assertEquals(Character.MAX_SURROGATE, (char) mhCharacterValue.invoke());
+
+ // double
+ MethodHandle mhSin =
+ lookup.findStatic(
+ Math.class, "sin", MethodType.methodType(double.class, double.class));
+ for (double i = -Math.PI; i <= Math.PI; i += Math.PI / 8) {
+ assertEquals(Math.sin(i), (double) mhSin.invokeExact(i));
+ assertEquals(Math.sin(i), (double) mhSin.invoke(i));
+ }
+
+ // float
+ MethodHandle mhAbsFloat =
+ lookup.findStatic(
+ Math.class, "abs", MethodType.methodType(float.class, float.class));
+ assertEquals(Math.abs(-3.3e6f), (float) mhAbsFloat.invokeExact(-3.3e6f));
+ assertEquals(Math.abs(-3.3e6f), (float) mhAbsFloat.invoke(-3.3e6f));
+
+ // int
+ MethodHandle mhAbsInt =
+ lookup.findStatic(Math.class, "abs", MethodType.methodType(int.class, int.class));
+ assertEquals(Math.abs(-1000), (int) mhAbsInt.invokeExact(-1000));
+ assertEquals(Math.abs(-1000), (int) mhAbsInt.invoke(-1000));
+
+ // long
+ MethodHandle mhMaxLong =
+ lookup.findStatic(
+ Math.class,
+ "max",
+ MethodType.methodType(long.class, long.class, long.class));
+ assertEquals(
+ Long.MAX_VALUE, (long) mhMaxLong.invokeExact(Long.MAX_VALUE, Long.MAX_VALUE / 2));
+ assertEquals(Long.MAX_VALUE, (long) mhMaxLong.invoke(Long.MAX_VALUE, Long.MAX_VALUE / 2));
+ assertEquals(0x0123456789abcdefL, (long) mhMaxLong.invokeExact(0x0123456789abcdefL, 0L));
+ assertEquals(0x0123456789abcdefL, (long) mhMaxLong.invoke(0x0123456789abcdefL, 0L));
+
+ // ref
+ MethodHandle mhShortValueOf =
+ lookup.findStatic(
+ Short.class, "valueOf", MethodType.methodType(Short.class, short.class));
+ assertEquals(
+ (short) -7890, ((Short) mhShortValueOf.invokeExact((short) -7890)).shortValue());
+ assertEquals((short) -7890, ((Short) mhShortValueOf.invoke((short) -7890)).shortValue());
+
+ // array
+ int [] array = {Integer.MIN_VALUE, -1, 0, +1, Integer.MAX_VALUE};
+ MethodHandle mhCopyOf =
+ lookup.findStatic(
+ Arrays.class, "copyOf", MethodType.methodType(int[].class, int[].class, int.class));
+ assertTrue(Arrays.equals(array, (int[]) mhCopyOf.invokeExact(array, array.length)));
+ assertTrue(Arrays.equals(array, (int[]) mhCopyOf.invoke(array, array.length)));
+
+ // short
+ MethodHandle mhShortValue =
+ lookup.findVirtual(Short.class, "shortValue", MethodType.methodType(short.class));
+ assertEquals((short) 12131, (short) mhShortValue.invokeExact(Short.valueOf((short) 12131)));
+ assertEquals((short) 12131, (short) mhShortValue.invoke(Short.valueOf((short) 12131)));
+
+ // boolean
+ MethodHandle mhBooleanValue =
+ lookup.findVirtual(
+ Boolean.class, "booleanValue", MethodType.methodType(boolean.class));
+ assertEquals(true, (boolean) mhBooleanValue.invokeExact(Boolean.valueOf(true)));
+ assertEquals(true, (boolean) mhBooleanValue.invoke(Boolean.valueOf(true)));
+ assertEquals(false, (boolean) mhBooleanValue.invokeExact(Boolean.valueOf(false)));
+ assertEquals(false, (boolean) mhBooleanValue.invoke(Boolean.valueOf(false)));
+
+ System.out.println("testReturnValues done.");
+ }
+
private static void testReferenceReturnValueConversions() throws Throwable {
MethodHandle mh = MethodHandles.lookup().findStatic(
Float.class, "valueOf", MethodType.methodType(Float.class, String.class));
diff --git a/test/961-default-iface-resolution-gen/build b/test/961-default-iface-resolution-gen/build
index d719a9ffe9..1d245894de 100755
--- a/test/961-default-iface-resolution-gen/build
+++ b/test/961-default-iface-resolution-gen/build
@@ -22,5 +22,4 @@ mkdir -p ./src
# Generate the smali files and expected.txt or fail
./util-src/generate_java.py ./src ./expected.txt
-# dx runs out of memory with default 256M, give it more memory.
-./default-build "$@" --experimental default-methods --dx-vm-option -JXmx1024M
+./default-build "$@" --experimental default-methods
diff --git a/test/964-default-iface-init-gen/build b/test/964-default-iface-init-gen/build
index e504690043..1d245894de 100755
--- a/test/964-default-iface-init-gen/build
+++ b/test/964-default-iface-init-gen/build
@@ -22,5 +22,4 @@ mkdir -p ./src
# Generate the smali files and expected.txt or fail
./util-src/generate_java.py ./src ./expected.txt
-# dx runs out of memory with just 256m, so increase it.
-./default-build "$@" --experimental default-methods --dx-vm-option -JXmx1024M
+./default-build "$@" --experimental default-methods
diff --git a/test/979-const-method-handle/build b/test/979-const-method-handle/build
index ce931a96d1..4d22cb608b 100755
--- a/test/979-const-method-handle/build
+++ b/test/979-const-method-handle/build
@@ -17,39 +17,41 @@
# make us exit on a failure
set -e
-ASM_JAR="${ANDROID_BUILD_TOP}/prebuilts/misc/common/asm/asm-6.0.jar"
-INTERMEDIATE_CLASSES=classes-intermediate
-TRANSFORMER_CLASSES=classes-transformer
-CLASSES=classes
-
-DEXER="${DX:-dx}"
-if [ "${USE_D8=false}" = "true" ]; then
- DEXER="${ANDROID_HOST_OUT}/bin/d8-compat-dx"
-fi
-
-# Create directories for classes
-for class_dir in "${INTERMEDIATE_CLASSES}" "${TRANSFORMER_CLASSES}" "${CLASSES}"; do
- rm -rf "${class_dir}"
- mkdir "${class_dir}"
-done
-
-# Build transformer
-${JAVAC:-javac} ${JAVAC_ARGS} -cp "${ASM_JAR}" -d ${TRANSFORMER_CLASSES} $(find util-src -name '*.java')
-
-# Generate intermediate classes that will allow transform to be applied to test classes
-JAVAC_ARGS="${JAVAC_ARGS} -source 1.8 -target 1.8"
-${JAVAC:-javac} ${JAVAC_ARGS} -cp ${TRANSFORMER_CLASSES} -d ${INTERMEDIATE_CLASSES} $(find src -name '*.java')
-
-# Run transform
-for class in ${INTERMEDIATE_CLASSES}/*.class ; do
- transformed_class=${CLASSES}/$(basename ${class})
- ${JAVA:-java} -cp "${ASM_JAR}:${TRANSFORMER_CLASSES}" \
- transformer.ConstantTransformer ${class} ${transformed_class}
-done
-
-# Create DEX
-DX_FLAGS="${DX_FLAGS} --min-sdk-version=28 --debug --dump-width=1000"
-${DEXER} -JXmx256m --dex ${DX_FLAGS} --dump-to=${CLASSES}.lst --output=classes.dex ${CLASSES} ${TRANSFORMER_CLASSES}
-
-# Zip DEX to file name expected by test runner
-zip ${TEST_NAME:-classes-dex}.jar classes.dex
+export ASM_JAR="${ANDROID_BUILD_TOP}/prebuilts/misc/common/asm/asm-6.0.jar"
+
+export ORIGINAL_JAVAC="$JAVAC"
+
+function javac_wrapper {
+ set -e
+
+ # Add annotation src files to our compiler inputs.
+ local asrcs=util-src/annotations/*.java
+
+ # Compile.
+ $ORIGINAL_JAVAC "$@" $asrcs
+
+ # Move original classes to intermediate location.
+ mv classes intermediate-classes
+ mkdir classes
+
+ # Transform intermediate classes.
+ local transformer_args="-cp ${ASM_JAR}:$PWD/transformer.jar transformer.ConstantTransformer"
+ for class in intermediate-classes/*.class ; do
+ local transformed_class=classes/$(basename ${class})
+ ${JAVA:-java} ${transformer_args} ${class} ${transformed_class}
+ done
+}
+
+export -f javac_wrapper
+export JAVAC=javac_wrapper
+
+######################################################################
+
+# Build the transformer to apply to compiled classes.
+mkdir classes
+${ORIGINAL_JAVAC:-javac} ${JAVAC_ARGS} -cp "${ASM_JAR}" -d classes $(find util-src -name '*.java')
+jar -cf transformer.jar -C classes transformer/ -C classes annotations/
+rm -rf classes
+
+# Use API level 28 for DEX file support constant method handles.
+./default-build "$@" --api-level 28
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f8bebdd35f..53d4c372c4 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -19,13 +19,11 @@ include art/build/Android.common_test.mk
# Dependencies for actually running a run-test.
TEST_ART_RUN_TEST_DEPENDENCIES := \
- $(HOST_OUT_EXECUTABLES)/dx \
$(HOST_OUT_EXECUTABLES)/d8 \
$(HOST_OUT_EXECUTABLES)/d8-compat-dx \
$(HOST_OUT_EXECUTABLES)/hiddenapi \
$(HOST_OUT_EXECUTABLES)/jasmin \
- $(HOST_OUT_EXECUTABLES)/smali \
- $(HOST_OUT_JAVA_LIBRARIES)/desugar.jar
+ $(HOST_OUT_EXECUTABLES)/smali
# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
# only once).
@@ -97,7 +95,7 @@ endif
# Host executables.
host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
-# Required for dx, jasmin, smali.
+# Required for jasmin and smali.
host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
# Sync test files to the target, depends upon all things that must be pushed
@@ -140,8 +138,11 @@ $(foreach target, $(TARGET_TYPES), \
$(call core-image-dependencies,$(target),$(image),$(compiler),$(address_size)))))))
test-art-host-run-test-dependencies : $(host_prereq_rules)
+.PHONY: test-art-host-run-test-dependencies
test-art-target-run-test-dependencies : $(target_prereq_rules)
+.PHONY: test-art-target-run-test-dependencies
test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
+.PHONY: test-art-run-test-dependencies
# Create a rule to build and run a test group of the following form:
# test-art-{1: host target}-run-test
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index f89888bb99..f2da6febe0 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -60,7 +60,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOatFile(JNIEnv* env, jclass c
ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
const DexFile& dex_file = klass->GetDexFile();
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
return (oat_dex_file != nullptr) ? JNI_TRUE : JNI_FALSE;
}
@@ -100,7 +100,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_compiledWithOptimizing(JNIEnv* e
ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
const DexFile& dex_file = klass->GetDexFile();
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+ const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr) {
// Could be JIT, which also uses optimizing, but conservatively say no.
return JNI_FALSE;
diff --git a/test/etc/default-build b/test/etc/default-build
index 9dbc73c6b4..8542ad0e92 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -17,9 +17,14 @@
# Stop if something fails.
set -e
+function fail() {
+ echo "$*" >&2
+ exit 1
+}
+
if [[ $# -le 0 ]]; then
echo 'Error:' '$0 should have the parameters from the "build" script forwarded to it' >&2
- echo 'Error: An example of how do it correctly is ./default-build "$@"'
+ fail 'Error: An example of how do it correctly is ./default-build "$@"'
exit 1
fi
@@ -98,7 +103,7 @@ if [ -z "${USE_HIDDENAPI}" ]; then
USE_HIDDENAPI=true
fi
-# DESUGAR=false run-test... will disable desugar.
+# DESUGAR=false run-test... will disable desugaring.
if [[ "$DESUGAR" == false ]]; then
USE_DESUGAR=false
fi
@@ -109,49 +114,25 @@ ZIP_COMPRESSION_METHOD="deflate"
WITH_ZIP_ALIGN=false
ZIP_ALIGN_BYTES="-1"
-DX_FLAGS="--min-sdk-version=26"
-DX_VM_FLAGS=""
-EXPERIMENTAL=""
-
BUILD_MODE="target"
DEV_MODE="no"
-# The key for default arguments if no experimental things are enabled.
DEFAULT_EXPERIMENT="no-experiment"
-# Setup experimental flag mappings in a bash associative array.
-declare -A SMALI_EXPERIMENTAL_ARGS
-SMALI_EXPERIMENTAL_ARGS["default-methods"]="--api 24"
-SMALI_EXPERIMENTAL_ARGS["method-handles"]="--api 26"
-SMALI_EXPERIMENTAL_ARGS["var-handles"]="--api 26"
-SMALI_EXPERIMENTAL_ARGS["agents"]="--api 26"
-
-declare -A JAVAC_EXPERIMENTAL_ARGS
-JAVAC_EXPERIMENTAL_ARGS["default-methods"]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS["lambdas"]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS["method-handles"]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS["parameter-annotations"]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS["var-handles"]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS[${DEFAULT_EXPERIMENT}]="-source 1.8 -target 1.8"
-JAVAC_EXPERIMENTAL_ARGS["agents"]="-source 1.8 -target 1.8"
-
-declare -A DX_EXPERIMENTAL_ARGS
-DX_EXPERIMENTAL_ARGS["method-handles"]="--min-sdk-version=26"
-DX_EXPERIMENTAL_ARGS["parameter-annotations"]="--min-sdk-version=25"
-DX_EXPERIMENTAL_ARGS["var-handles"]="--min-sdk-version=28"
+# The key for default arguments if no experimental things are enabled.
+EXPERIMENTAL=$DEFAULT_EXPERIMENT
+
+# Setup experimental API level mappings in a bash associative array.
+declare -A EXPERIMENTAL_API_LEVEL
+EXPERIMENTAL_API_LEVEL[${DEFAULT_EXPERIMENT}]="26"
+EXPERIMENTAL_API_LEVEL["default-methods"]="24"
+EXPERIMENTAL_API_LEVEL["parameter-annotations"]="25"
+EXPERIMENTAL_API_LEVEL["agents"]="26"
+EXPERIMENTAL_API_LEVEL["method-handles"]="26"
+EXPERIMENTAL_API_LEVEL["var-handles"]="28"
while true; do
- if [ "x$1" = "x--dx-option" ]; then
- shift
- option="$1"
- DX_FLAGS="${DX_FLAGS} $option"
- shift
- elif [ "x$1" = "x--dx-vm-option" ]; then
- shift
- option="$1"
- DX_VM_FLAGS="${DX_VM_FLAGS} $option"
- shift
- elif [ "x$1" = "x--no-src" ]; then
+ if [ "x$1" = "x--no-src" ]; then
HAS_SRC=false
shift
elif [ "x$1" = "x--no-src2" ]; then
@@ -172,11 +153,14 @@ while true; do
elif [ "x$1" = "x--no-jasmin" ]; then
HAS_JASMIN=false
shift
+ elif [ "x$1" = "x--api-level" ]; then
+ shift
+ EXPERIMENTAL_API_LEVEL[${EXPERIMENTAL}]=$1
+ shift
elif [ "x$1" = "x--experimental" ]; then
shift
# We have a specific experimental configuration so don't use the default.
- DEFAULT_EXPERIMENT=""
- EXPERIMENTAL="${EXPERIMENTAL} $1"
+ EXPERIMENTAL="$1"
shift
elif [ "x$1" = "x--zip-compression-method" ]; then
# Allow using different zip compression method, e.g. 'store'
@@ -202,29 +186,25 @@ while true; do
DEV_MODE="yes"
shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
- echo "unknown $0 option: $1" 1>&2
- exit 1
+ fail "unknown $0 option: $1"
else
break
fi
done
if [[ $BUILD_MODE == jvm ]]; then
- # Does not need desugar on jvm because it supports the latest functionality.
+ # Does not need desugaring on jvm because it supports the latest functionality.
USE_DESUGAR=false
# Do not attempt to build src-art directories on jvm, it would fail without libcore.
HAS_SRC_ART=false
fi
-# Be sure to get any default arguments if not doing any experiments.
-EXPERIMENTAL="${EXPERIMENTAL} ${DEFAULT_EXPERIMENT}"
+# Set API level for smali and d8.
+API_LEVEL="${EXPERIMENTAL_API_LEVEL[${EXPERIMENTAL}]}"
-# Add args from the experimental mappings.
-for experiment in ${EXPERIMENTAL}; do
- SMALI_ARGS="${SMALI_ARGS} ${SMALI_EXPERIMENTAL_ARGS[${experiment}]}"
- JAVAC_ARGS="${JAVAC_ARGS} ${JAVAC_EXPERIMENTAL_ARGS[${experiment}]}"
- DX_FLAGS="${DX_FLAGS} ${DX_EXPERIMENTAL_ARGS[${experiment}]}"
-done
+# Add API level arguments to smali and dx
+SMALI_ARGS="${SMALI_ARGS} --api $API_LEVEL"
+D8_FLAGS="${D8_FLAGS} --min-api $API_LEVEL"
#########################################
@@ -261,16 +241,6 @@ function make_jasmin() {
fi
}
-function desugar() {
- local desugar_args="--mode=$BUILD_MODE"
-
- if [[ $DEV_MODE == yes ]]; then
- desugar_args="$desugar_args --show-commands"
- fi
-
- "$DESUGAR" --core-only $desugar_args "$@"
-}
-
# Like regular javac but may include libcore on the bootclasspath.
function javac_with_bootclasspath {
local helper_args="--mode=$BUILD_MODE"
@@ -283,31 +253,34 @@ function javac_with_bootclasspath {
"$ANDROID_BUILD_TOP/art/tools/javac-helper.sh" --core-only $helper_args ${JAVAC_ARGS} "$@"
}
-# Make a "dex" file given a directory of classes in $1.
-# Also calls desugar on the classes first to convert lambdas.
+# Make a "dex" file given a directory of classes in $1. This will be
+# packaged in a jar file.
function make_dex() {
local name="$1"
-
- local dx_input
- if [[ "$USE_DESUGAR" == "true" ]]; then
- # Make a jar first so desugar doesn't need every .class file individually.
- jar cf "$name.before-desugar.jar" -C "$name" .
-
- dx_input="${name}.desugar.jar"
-
- # Make desugared JAR.
- desugar --input "$name.before-desugar.jar" --output "$dx_input"
+ local d8_inputs=$(find $name -name '*.class' -type f)
+ local d8_output=${name}.jar
+ local dex_output=${name}.dex
+ local d8_local_flags=""
+ if [[ "$USE_DESUGAR" = "true" ]]; then
+ local boot_class_path_list=$($ANDROID_BUILD_TOP/art/tools/bootjars.sh --$BUILD_MODE --core --path)
+ for boot_class_path_element in $boot_class_path_list; do
+ d8_local_flags="$d8_local_flags --classpath $boot_class_path_element"
+ done
else
- dx_input="${name}"
+ d8_local_flags="$d8_local_flags --no-desugaring"
fi
-
- local dexer="${DX}"
- if [[ "${USE_D8}" != "false" ]]; then
- dexer="${ANDROID_HOST_OUT}/bin/d8-compat-dx"
+ if [ "$DEV_MODE" = "yes" ]; then
+ echo ${D8} ${D8_FLAGS} $d8_local_flags --output $d8_output $d8_inputs
fi
+ ${D8} ${D8_FLAGS} $d8_local_flags --output $d8_output $d8_inputs
- # Make dex file from desugared JAR.
- ${dexer} -JXmx256m ${DX_VM_FLAGS} --debug --dex --dump-to=${name}.lst --output=${name}.dex --dump-width=1000 ${DX_FLAGS} "${dx_input}"
+ # D8 outputs to JAR files today rather than DEX files as DX used
+ # to. To compensate, we extract the DEX from d8's output to meet the
+ # expectations of make_dex callers.
+ if [ "$DEV_MODE" = "yes" ]; then
+ echo unzip -p $d8_output classes.dex \> $dex_output
+ fi
+ unzip -p $d8_output classes.dex > $dex_output
}
# Merge all the dex files in $1..$N into $1. Skip non-existing files, but at least 1 file must exist.
@@ -337,11 +310,10 @@ function make_dexmerge() {
# We assume the dexer did all the API level checks and just merge away.
mkdir d8_merge_out
- ${DXMERGER} --min-api 1000 --output ./d8_merge_out "${dex_files_to_merge[@]}"
+ ${DEXMERGER} --min-api 1000 --output ./d8_merge_out "${dex_files_to_merge[@]}"
if [[ -e "./d8_merge_out/classes2.dex" ]]; then
- echo "Cannot merge all dex files into a single dex"
- exit 1
+ fail "Cannot merge all dex files into a single dex"
fi
mv ./d8_merge_out/classes.dex "$dst_file";
@@ -349,7 +321,7 @@ function make_dexmerge() {
}
function make_hiddenapi() {
- local args=()
+ local args=( "encode" )
while [[ $# -gt 0 ]]; do
args+=("--dex=$1")
shift
@@ -421,7 +393,9 @@ else
javac_with_bootclasspath -classpath classes -d classes `find src2 -name '*.java'`
fi
- if [[ "${HAS_SRC}" == "true" || "${HAS_SRC2}" == "true" || "${HAS_SRC_ART}" == "true" ]]; then
+ # If the classes directory is not-empty, package classes in a DEX file. NB some
+ # tests provide classes rather than java files.
+ if [ "$(ls -A classes)" ]; then
if [ ${NEED_DEX} = "true" ]; then
make_dex classes
fi
@@ -432,8 +406,7 @@ if [[ "${HAS_JASMIN}" == true ]]; then
# Compile Jasmin classes as if they were part of the classes.dex file.
make_jasmin jasmin_classes $(find 'jasmin' -name '*.j')
if [[ "${NEED_DEX}" == "true" ]]; then
- # Disable desugar because it won't handle intentional linkage errors.
- USE_DESUGAR=false make_dex jasmin_classes
+ make_dex jasmin_classes
make_dexmerge classes.dex jasmin_classes.dex
else
# Move jasmin classes into classes directory so that they are picked up with -cp classes.
@@ -446,8 +419,7 @@ if [ "${HAS_SMALI}" = "true" -a ${NEED_DEX} = "true" ]; then
# Compile Smali classes
${SMALI} -JXmx512m assemble ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
if [[ ! -s smali_classes.dex ]] ; then
- echo ${SMALI} produced no output. >&2
- exit 1
+ fail "${SMALI} produced no output."
fi
# Merge smali files into classes.dex, this takes priority over any jasmin files.
make_dexmerge classes.dex smali_classes.dex
@@ -458,10 +430,7 @@ if [[ "$HAS_JASMIN_MULTIDEX" == true ]]; then
make_jasmin jasmin_classes2 $(find 'jasmin-multidex' -name '*.j')
if [[ "${NEED_DEX}" == "true" ]]; then
- # Disable desugar because it won't handle intentional linkage errors.
- USE_DESUGAR=false make_dex jasmin_classes2
-
- # Merge jasmin_classes2.dex into classes2.dex
+ make_dex jasmin_classes2
make_dexmerge classes2.dex jasmin_classes2.dex
else
# Move jasmin classes into classes2 directory so that they are picked up with -cp classes2.
@@ -478,7 +447,6 @@ if [ "${HAS_SMALI_MULTIDEX}" = "true" -a ${NEED_DEX} = "true" ]; then
make_dexmerge classes2.dex smali_classes2.dex
fi
-
if [ ${HAS_SRC_EX} = "true" ]; then
# Build src-ex into classes-ex.
# Includes 'src', 'src-art' source when compiling classes-ex, but exclude their .class files.
@@ -523,7 +491,7 @@ fi
# Create a single dex jar with two dex files for multidex.
if [ ${NEED_DEX} = "true" ]; then
- if $(has_multidex); then
+ if [ -f classes2.dex ] ; then
zip $TEST_NAME.jar classes.dex classes2.dex
else
zip $TEST_NAME.jar classes.dex
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 1ba433e974..713fd35523 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -377,6 +377,9 @@ CHROOT_DEX_LOCATION="$CHROOT$DEX_LOCATION"
if [ "$USE_JVM" = "n" ]; then
FLAGS="${FLAGS} ${ANDROID_FLAGS}"
+ # we don't want to be trying to get adbconnections since the plugin might
+ # not have been built.
+ FLAGS="${FLAGS} -XjdwpProvider:none"
for feature in ${EXPERIMENTAL}; do
FLAGS="${FLAGS} -Xexperimental:${feature} -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:${feature}"
COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
diff --git a/test/knownfailures.json b/test/knownfailures.json
index ed98d233c3..9ba2b50cba 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -454,6 +454,14 @@
},
{
"tests": [
+ "638-no-line-number"
+ ],
+ "description": ["Tests that fail on redefine stress due to branch instruction selection"],
+ "bug": "b/110869946",
+ "variant": "redefine-stress"
+ },
+ {
+ "tests": [
"097-duplicate-method",
"138-duplicate-classes-check2",
"159-app-image-fields",
@@ -532,13 +540,6 @@
"bug": "b/33650497"
},
{
- "tests": "640-checker-integer-valueof",
- "description": [
- "The java.lang.Integer.valueOf intrinsic is not supported in PIC mode."
- ],
- "variant": "optimizing & pictest | speed-profile & pictest"
- },
- {
"tests": "202-thread-oome",
"description": "ASAN aborts when large thread stacks are requested.",
"variant": "host",
@@ -991,5 +992,11 @@
"variant": "jit",
"bug": "b/77567088",
"description": ["Test throws exception before or during OOME."]
+ },
+ {
+ "tests": ["021-string2"],
+ "variant": "jit & debuggable",
+ "bug": "b/109791792",
+ "description": ["Stack too big."]
}
]
diff --git a/test/run-test b/test/run-test
index 5bd8b3b348..d90eccdf75 100755
--- a/test/run-test
+++ b/test/run-test
@@ -41,7 +41,7 @@ else
fi
checker="${progdir}/../tools/checker/checker.py"
export JAVA="java"
-export JAVAC="javac -g -Xlint:-options"
+export JAVAC="javac -g -Xlint:-options -source 1.8 -target 1.8"
export RUN="${progdir}/etc/run-test-jar"
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
@@ -56,10 +56,10 @@ fi
# If dx was not set by the environment variable, assume it is in the path.
if [ -z "$DX" ]; then
- export DX="dx"
+ export DX="d8-compat-dx"
fi
-export DXMERGER="$D8"
+export DEXMERGER="$D8"
# If jasmin was not set by the environment variable, assume it is in the path.
if [ -z "$JASMIN" ]; then
@@ -529,12 +529,27 @@ fi
# Most interesting target architecture variables are Makefile variables, not environment variables.
# Try to map the suffix64 flag and what we find in ${ANDROID_PRODUCT_OUT}/data/art-test to an architecture name.
function guess_target_arch_name() {
- grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86|mips)$'`
- grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64|mips64)$'`
- if [ "x${suffix64}" = "x64" ]; then
- target_arch_name=${grep64bit}
+ # Check whether this is a device with native bridge. Currently this is hardcoded
+ # to x86 + arm.
+ x86_arm=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | sort | grep -E '^(arm|x86)$'`
+ # Collapse line-breaks into spaces
+ x86_arm=$(echo $x86_arm)
+ if [ "x$x86_arm" = "xarm x86" ] ; then
+ err_echo "Native-bridge configuration detected."
+ # We only support the main arch for tests.
+ if [ "x${suffix64}" = "x64" ]; then
+ target_arch_name=""
+ else
+ target_arch_name=x86
+ fi
else
- target_arch_name=${grep32bit}
+ grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86|mips)$'`
+ grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64|mips64)$'`
+ if [ "x${suffix64}" = "x64" ]; then
+ target_arch_name=${grep64bit}
+ else
+ target_arch_name=${grep32bit}
+ fi
fi
}
@@ -660,13 +675,6 @@ if [ "$usage" = "no" ]; then
shift
fi
-# For building with javac and dx always use Java 7. The dx compiler
-# only support byte codes from Java 7 or earlier (class file major
-# version 51 or lower).
-if [ "$NEED_DEX" = "true" ]; then
- export JAVAC="${JAVAC} -source 1.7 -target 1.7"
-fi
-
if [ "$usage" = "yes" ]; then
prog=`basename $prog`
(
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 66ed0d0004..1f4b829989 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -140,3 +140,6 @@ ANDROID_JAVA_TOOLCHAIN = os.path.join(ANDROID_BUILD_TOP,
# include platform prebuilt java, javac, etc in $PATH.
os.environ['PATH'] = ANDROID_JAVA_TOOLCHAIN + ':' + os.environ['PATH']
+
+DIST_DIR = _get_build_var('DIST_DIR')
+SOONG_OUT_DIR = _get_build_var('SOONG_OUT_DIR')
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 71f4cc0731..1924cee310 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -265,15 +265,11 @@ target_config = {
'ART_USE_READ_BARRIER' : 'false'
}
},
- 'art-gtest-valgrind32': {
- # Disabled: x86 valgrind does not understand SSE4.x
- # 'make' : 'valgrind-test-art-host32',
- 'env': {
- 'ART_USE_READ_BARRIER' : 'false'
- }
- },
+ # TODO: Remove this configuration, when the ART Buildbot is no
+ # longer using it for 'host-x86_64-valgrind'.
'art-gtest-valgrind64': {
- 'make' : 'valgrind-test-art-host64',
+ # Disabled: Valgrind is no longer supported.
+ # 'make' : 'valgrind-test-art-host64',
'env': {
'ART_USE_READ_BARRIER' : 'false'
}
@@ -291,10 +287,12 @@ target_config = {
'ASAN_OPTIONS' : 'detect_leaks=0'
}
},
+ # TODO: Also exercise '--interp-ac' in 'art-asan', when b/109813469 is addressed.
'art-asan': {
'run-test' : ['--interpreter',
'--optimizing',
- '--jit'],
+ '--jit',
+ '--speed-profile'],
'env': {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 2d1398e3fe..e8d4290d28 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -52,6 +52,8 @@ import json
import multiprocessing
import os
import re
+import shlex
+import shutil
import subprocess
import sys
import tempfile
@@ -115,6 +117,7 @@ build = False
gdb = False
gdb_arg = ''
runtime_option = ''
+run_test_option = []
stop_testrunner = False
dex2oat_jobs = -1 # -1 corresponds to default threads for dex2oat
run_all_configs = False
@@ -325,6 +328,8 @@ def run_tests(tests):
if gdb_arg:
options_all += ' --gdb-arg ' + gdb_arg
+ options_all += ' ' + ' '.join(run_test_option)
+
if runtime_option:
for opt in runtime_option:
options_all += ' --runtime-option ' + opt
@@ -904,6 +909,7 @@ def parse_option():
global gdb
global gdb_arg
global runtime_option
+ global run_test_option
global timeout
global dex2oat_jobs
global run_all_configs
@@ -933,6 +939,12 @@ def parse_option():
global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD)
global_group.add_argument('--gdb', action='store_true', dest='gdb')
global_group.add_argument('--gdb-arg', dest='gdb_arg')
+ global_group.add_argument('--run-test-option', action='append', dest='run_test_option',
+ default=[],
+ help="""Pass an option, unaltered, to the run-test script.
+ This should be enclosed in single-quotes to allow for spaces. The option
+ will be split using shlex.split() prior to invoking run-test.
+ Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""")
global_group.add_argument('--runtime-option', action='append', dest='runtime_option',
help="""Pass an option to the runtime. Runtime options
starting with a '-' must be separated by a '=', for
@@ -981,6 +993,8 @@ def parse_option():
if options['gdb_arg']:
gdb_arg = options['gdb_arg']
runtime_option = options['runtime_option'];
+ run_test_option = sum(map(shlex.split, options['run_test_option']), [])
+
timeout = options['timeout']
if options['dex2oat_jobs']:
dex2oat_jobs = options['dex2oat_jobs']
@@ -996,17 +1010,20 @@ def main():
if build:
build_targets = ''
if 'host' in _user_input_variants['target']:
- build_targets += 'test-art-host-run-test-dependencies'
+ build_targets += 'test-art-host-run-test-dependencies '
if 'target' in _user_input_variants['target']:
- build_targets += 'test-art-target-run-test-dependencies'
+ build_targets += 'test-art-target-run-test-dependencies '
if 'jvm' in _user_input_variants['target']:
- build_targets += 'test-art-host-run-test-dependencies'
+ build_targets += 'test-art-host-run-test-dependencies '
build_command = 'make'
build_command += ' DX='
build_command += ' -j'
build_command += ' -C ' + env.ANDROID_BUILD_TOP
build_command += ' ' + build_targets
if subprocess.call(build_command.split()):
+ # Debugging for b/62653020
+ if env.DIST_DIR:
+ shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
sys.exit(1)
if user_requested_tests:
test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_tests,))
diff --git a/test/ti-agent/breakpoint_helper.cc b/test/ti-agent/breakpoint_helper.cc
index db4ea61f1c..83ba0a6342 100644
--- a/test/ti-agent/breakpoint_helper.cc
+++ b/test/ti-agent/breakpoint_helper.cc
@@ -114,7 +114,7 @@ extern "C" JNIEXPORT jlong JNICALL Java_art_Breakpoint_getStartLocation(JNIEnv*
return 0;
}
jlong start = 0;
- jlong end = end;
+ jlong end;
JvmtiErrorToException(env, jvmti_env, jvmti_env->GetMethodLocation(method, &start, &end));
return start;
}
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index 0eba7426c0..bd320c66cc 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -56,6 +56,12 @@ struct StressData {
bool step_stress;
};
+static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+ if (obj != nullptr) {
+ env->DeleteLocalRef(obj);
+ }
+}
+
static bool DoExtractClassFromData(jvmtiEnv* env,
const std::string& descriptor,
jint in_len,
@@ -130,8 +136,8 @@ class ScopedThreadInfo {
if (free_name_) {
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
}
- env_->DeleteLocalRef(info_.thread_group);
- env_->DeleteLocalRef(info_.context_class_loader);
+ DeleteLocalRef(env_, info_.thread_group);
+ DeleteLocalRef(env_, info_.context_class_loader);
}
const char* GetName() const {
@@ -227,7 +233,7 @@ class ScopedMethodInfo {
first_line_(-1) {}
~ScopedMethodInfo() {
- env_->DeleteLocalRef(declaring_class_);
+ DeleteLocalRef(env_, declaring_class_);
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
@@ -389,7 +395,7 @@ static std::string GetName(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jobject obj) {
char *cname, *cgen;
if (jvmtienv->GetClassSignature(klass, &cname, &cgen) != JVMTI_ERROR_NONE) {
LOG(ERROR) << "Unable to get class name!";
- jnienv->DeleteLocalRef(klass);
+ DeleteLocalRef(jnienv, klass);
return "<UNKNOWN>";
}
std::string name(cname);
@@ -407,7 +413,7 @@ static std::string GetName(jvmtiEnv* jvmtienv, JNIEnv* jnienv, jobject obj) {
}
jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cname));
jvmtienv->Deallocate(reinterpret_cast<unsigned char*>(cgen));
- jnienv->DeleteLocalRef(klass);
+ DeleteLocalRef(jnienv, klass);
return name;
}
@@ -468,7 +474,7 @@ void JNICALL FieldAccessHook(jvmtiEnv* jvmtienv,
<< "type \"" << obj_class_info.GetName() << "\" in method \"" << method_info
<< "\" at location 0x" << std::hex << location << ". Thread is \""
<< info.GetName() << "\".";
- env->DeleteLocalRef(oklass);
+ DeleteLocalRef(env, oklass);
}
static std::string PrintJValue(jvmtiEnv* jvmtienv, JNIEnv* env, char type, jvalue new_value) {
@@ -486,7 +492,7 @@ static std::string PrintJValue(jvmtiEnv* jvmtienv, JNIEnv* env, char type, jvalu
} else {
oss << "of type \"" << nv_class_info.GetName() << "\"";
}
- env->DeleteLocalRef(nv_klass);
+ DeleteLocalRef(env, nv_klass);
}
break;
}
@@ -539,7 +545,7 @@ void JNICALL FieldModificationHook(jvmtiEnv* jvmtienv,
<< "\" at location 0x" << std::hex << location << std::dec << ". New value is "
<< PrintJValue(jvmtienv, env, type, new_value) << ". Thread is \""
<< info.GetName() << "\".";
- env->DeleteLocalRef(oklass);
+ DeleteLocalRef(env, oklass);
}
void JNICALL MethodExitHook(jvmtiEnv* jvmtienv,
JNIEnv* env,
@@ -714,7 +720,7 @@ static void JNICALL PerformFinalSetupVMInit(jvmtiEnv *jvmti_env,
} else {
// GetMethodID is spec'd to cause the class to be initialized.
jni_env->GetMethodID(klass, "hashCode", "()I");
- jni_env->DeleteLocalRef(klass);
+ DeleteLocalRef(jni_env, klass);
data->vm_class_loader_initialized = true;
}
}
@@ -761,7 +767,7 @@ static bool WatchAllFields(JavaVM* vm, jvmtiEnv* jvmti) {
return false;
}
jvmti->Deallocate(reinterpret_cast<unsigned char*>(fields));
- jni->DeleteLocalRef(k);
+ DeleteLocalRef(jni, k);
}
jvmti->Deallocate(reinterpret_cast<unsigned char*>(klasses));
return true;
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
deleted file mode 100644
index a97d03c2d4..0000000000
--- a/test/valgrind-suppressions.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-{
- b/27596582
- Memcheck:Cond
- fun:index
- fun:expand_dynamic_string_token
- fun:_dl_map_object
- fun:map_doit
- fun:_dl_catch_error
- fun:do_preload
- fun:dl_main
- fun:_dl_sysdep_start
- fun:_dl_start_final
- fun:_dl_start
- obj:/lib/x86_64-linux-gnu/ld-2.19.so
-}
-
-{
- b/31275764
- Memcheck:Leak
- match-leak-kinds: definite
- fun:malloc
- ...
- fun:_ZN3art7Runtime17InitNativeMethodsEv
-}
-
-# SigQuit runs libbacktrace
-{
- BackTraceReading64
- Memcheck:Addr8
- fun:access_mem_unrestricted
- fun:_Uelf64_memory_read
- fun:_Uelf64_valid_object_memory
- fun:map_create_list
- fun:unw_map_local_create
- fun:_ZN14UnwindMapLocal5BuildEv
- fun:_ZN12BacktraceMap6CreateEib
-}
-{
- BackTraceReading32
- Memcheck:Addr4
- fun:access_mem_unrestricted
- fun:_Uelf32_memory_read
- fun:_Uelf32_valid_object_memory
- fun:map_create_list
- fun:unw_map_local_create
- fun:_ZN14UnwindMapLocal5BuildEv
- fun:_ZN12BacktraceMap6CreateEib
-}
-{
- BackTraceReading64
- Memcheck:Addr8
- fun:access_mem_unrestricted
- fun:_Uelf64_memory_read
- fun:_Uelf64_get_load_base
- fun:map_create_list
- fun:unw_map_local_create
- fun:_ZN14UnwindMapLocal5BuildEv
- fun:_ZN12BacktraceMap6CreateEib
-}
-{
- BackTraceReading32
- Memcheck:Addr4
- fun:access_mem_unrestricted
- fun:_Uelf32_memory_read
- fun:_Uelf32_get_load_base
- fun:map_create_list
- fun:unw_map_local_create
- fun:_ZN14UnwindMapLocal5BuildEv
- fun:_ZN12BacktraceMap6CreateEib
-}
-
-{
- process_vm_readv
- Memcheck:Param
- process_vm_readv(lvec[...])
- fun:process_vm_readv
-}
-
-# Suppressions for IsAddressMapped check in MemMapTest
-{
- MemMapTest_IsAddressMapped
- Memcheck:Param
- msync(start)
- ...
- fun:_ZN3art10MemMapTest15IsAddressMappedEPv
- ...
-}
diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt
deleted file mode 100644
index 0d63a1c7aa..0000000000
--- a/test/valgrind-target-suppressions.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-# Valgrind does not recognize the ashmen ioctl() calls on ARM64, so it assumes that a size
-# parameter is a pointer.
-{
- ashmem ioctl
- Memcheck:Param
- ioctl(generic)
- ...
- fun:ioctl
- fun:ashmem_create_region
-}
-
-# It seems that on ARM64 Valgrind considers the canary value used by the Clang stack protector to
-# be an uninitialized value.
-{
- jemalloc chunk_alloc_cache
- Memcheck:Cond
- fun:je_chunk_alloc_cache
-}
-
-# The VectorImpl class does not hold a pointer to the allocated SharedBuffer structure, but to the
-# beginning of the data, which is effectively an interior pointer. Valgrind has limitations when
-# dealing with interior pointers.
-{
- VectorImpl
- Memcheck:Leak
- match-leak-kinds:possible
- fun:malloc
- # The wildcards make this rule work both for 32-bit and 64-bit environments.
- fun:_ZN7android12SharedBuffer5allocE?
- fun:_ZN7android10VectorImpl5_growE??
-}
-
-# Clang/LLVM uses memcpy for *x = *y, even though x == y (which is undefined behavior). Ignore.
-# b/29279679, https://llvm.org/bugs/show_bug.cgi?id=11763
-{
- MemCpySelfAssign
- Memcheck:Overlap
- fun:memcpy
- ...
- fun:je_malloc_tsd_boot0
-}
-
-# Setenv is known-leaking when overwriting mappings. This is triggered by re-initializing
-# ANDROID_DATA. Ignore all setenv leaks.
-{
- SetenvAndroidDataReinit
- Memcheck:Leak
- match-leak-kinds: definite
- fun:malloc
- fun:setenv
-}
-
-{
- b/31275764
- Memcheck:Leak
- match-leak-kinds: definite
- fun:malloc
- ...
- fun:_ZN3art7Runtime17InitNativeMethodsEv
-}
-
-# art::MemMap::MapInternal() uses msync() to check for the existence of memory mappings.
-{
- art::MemMap::MapInternal()
- Memcheck:Param
- msync(start)
- fun:msync
- fun:_ZN3art6MemMap11MapInternalEPvmiiilb
-}
-
-{
- process_vm_readv
- Memcheck:Param
- process_vm_readv(lvec[...])
- fun:process_vm_readv
-}
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index ad33233159..2741a9247d 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -28,9 +28,6 @@ LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := ahat
-# Let users with Java 7 run ahat (b/28303627)
-LOCAL_JAVA_LANGUAGE_VERSION := 1.7
-
# Make this available on the classpath of the general-tests tradefed suite.
# It is used by libcore tests that run there.
LOCAL_COMPATIBILITY_SUITE := general-tests
@@ -147,6 +144,7 @@ LOCAL_STATIC_JAVA_LIBRARIES := ahat junit-host
LOCAL_IS_HOST_MODULE := true
LOCAL_MODULE_TAGS := tests
LOCAL_MODULE := ahat-tests
+LOCAL_COMPATIBILITY_SUITE := general-tests
include $(BUILD_HOST_JAVA_LIBRARY)
AHAT_TEST_JAR := $(LOCAL_BUILT_MODULE)
diff --git a/tools/ahat/AndroidTest.xml b/tools/ahat/AndroidTest.xml
new file mode 100644
index 0000000000..b07905a9a7
--- /dev/null
+++ b/tools/ahat/AndroidTest.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Runs the ahat unit tests">
+ <option name="test-suite-tag" value="ahat" />
+ <option name="test-suite-tag" value="art-tools" />
+ <option name="null-device" value="true" />
+ <test class="com.android.tradefed.testtype.HostTest" >
+ <option name="class" value="com.android.ahat.AhatTestSuite" />
+ </test>
+</configuration>
diff --git a/tools/ahat/etc/ahat-tests.mf b/tools/ahat/etc/ahat-tests.mf
index af17fadded..48fdeb3a01 100644
--- a/tools/ahat/etc/ahat-tests.mf
+++ b/tools/ahat/etc/ahat-tests.mf
@@ -1 +1 @@
-Main-Class: com.android.ahat.Tests
+Main-Class: com.android.ahat.AhatTestSuite
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index 93fe46bf8b..f60c1a84fa 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -10,6 +10,7 @@ package com.android.ahat.dominators {
public class DominatorsComputation {
method public static void computeDominators(com.android.ahat.dominators.DominatorsComputation.Node);
+ method public static void computeDominators(com.android.ahat.dominators.DominatorsComputation.Node, com.android.ahat.progress.Progress, long);
}
public static abstract interface DominatorsComputation.Node {
@@ -27,12 +28,10 @@ package com.android.ahat.heapdump {
method public int getLength();
method public com.android.ahat.heapdump.Value getValue(int);
method public java.util.List<com.android.ahat.heapdump.Value> getValues();
- method public java.lang.String toString();
}
public class AhatClassInstance extends com.android.ahat.heapdump.AhatInstance {
method public java.lang.Iterable<com.android.ahat.heapdump.FieldValue> getInstanceFields();
- method public java.lang.String toString();
}
public class AhatClassObj extends com.android.ahat.heapdump.AhatInstance {
@@ -42,7 +41,6 @@ package com.android.ahat.heapdump {
method public java.lang.String getName();
method public java.util.List<com.android.ahat.heapdump.FieldValue> getStaticFieldValues();
method public com.android.ahat.heapdump.AhatClassObj getSuperClassObj();
- method public java.lang.String toString();
}
public class AhatHeap implements com.android.ahat.heapdump.Diffable {
@@ -60,6 +58,7 @@ package com.android.ahat.heapdump {
method public java.lang.String asString(int);
method public java.lang.String asString();
method public com.android.ahat.heapdump.AhatInstance getAssociatedBitmapInstance();
+ method public com.android.ahat.heapdump.AhatClassObj getAssociatedClassForOverhead();
method public com.android.ahat.heapdump.AhatInstance getBaseline();
method public java.lang.String getClassName();
method public com.android.ahat.heapdump.AhatClassObj getClassObj();
@@ -67,19 +66,21 @@ package com.android.ahat.heapdump {
method public java.util.List<com.android.ahat.heapdump.AhatInstance> getDominated();
method public java.lang.Object getDominatorsComputationState();
method public com.android.ahat.heapdump.Value getField(java.lang.String);
- method public java.util.List<com.android.ahat.heapdump.AhatInstance> getHardReverseReferences();
+ method public deprecated java.util.List<com.android.ahat.heapdump.AhatInstance> getHardReverseReferences();
method public com.android.ahat.heapdump.AhatHeap getHeap();
method public long getId();
method public com.android.ahat.heapdump.AhatInstance getImmediateDominator();
method public java.util.List<com.android.ahat.heapdump.PathElement> getPathFromGcRoot();
+ method public com.android.ahat.heapdump.Reachability getReachability();
method public com.android.ahat.heapdump.AhatInstance getRefField(java.lang.String);
method public java.lang.Iterable<? extends com.android.ahat.dominators.DominatorsComputation.Node> getReferencesForDominators();
method public com.android.ahat.heapdump.AhatInstance getReferent();
method public com.android.ahat.heapdump.Size getRetainedSize(com.android.ahat.heapdump.AhatHeap);
+ method public java.util.List<com.android.ahat.heapdump.AhatInstance> getReverseReferences();
method public java.util.Collection<com.android.ahat.heapdump.RootType> getRootTypes();
method public com.android.ahat.heapdump.Site getSite();
method public com.android.ahat.heapdump.Size getSize();
- method public java.util.List<com.android.ahat.heapdump.AhatInstance> getSoftReverseReferences();
+ method public deprecated java.util.List<com.android.ahat.heapdump.AhatInstance> getSoftReverseReferences();
method public com.android.ahat.heapdump.Size getTotalRetainedSize();
method public boolean isArrayInstance();
method public boolean isClassInstance();
@@ -88,7 +89,7 @@ package com.android.ahat.heapdump {
method public boolean isRoot();
method public boolean isStronglyReachable();
method public boolean isUnreachable();
- method public boolean isWeaklyReachable();
+ method public deprecated boolean isWeaklyReachable();
method public void setDominator(com.android.ahat.dominators.DominatorsComputation.Node);
method public void setDominatorsComputationState(java.lang.Object);
method public abstract java.lang.String toString();
@@ -157,8 +158,13 @@ package com.android.ahat.heapdump {
}
public class Parser {
+ ctor public Parser(java.nio.ByteBuffer);
+ ctor public Parser(java.io.File) throws java.io.IOException;
+ method public com.android.ahat.heapdump.Parser map(com.android.ahat.proguard.ProguardMap);
+ method public com.android.ahat.heapdump.AhatSnapshot parse() throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.io.File, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.nio.ByteBuffer, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
+ method public com.android.ahat.heapdump.Parser progress(com.android.ahat.progress.Progress);
}
public class PathElement implements com.android.ahat.heapdump.Diffable {
@@ -170,6 +176,17 @@ package com.android.ahat.heapdump {
field public boolean isDominator;
}
+ public final class Reachability extends java.lang.Enum {
+ method public static com.android.ahat.heapdump.Reachability valueOf(java.lang.String);
+ method public static final com.android.ahat.heapdump.Reachability[] values();
+ enum_constant public static final com.android.ahat.heapdump.Reachability FINALIZER;
+ enum_constant public static final com.android.ahat.heapdump.Reachability PHANTOM;
+ enum_constant public static final com.android.ahat.heapdump.Reachability SOFT;
+ enum_constant public static final com.android.ahat.heapdump.Reachability STRONG;
+ enum_constant public static final com.android.ahat.heapdump.Reachability UNREACHABLE;
+ enum_constant public static final com.android.ahat.heapdump.Reachability WEAK;
+ }
+
public final class RootType extends java.lang.Enum {
method public static com.android.ahat.heapdump.RootType valueOf(java.lang.String);
method public static final com.android.ahat.heapdump.RootType[] values();
@@ -284,6 +301,26 @@ package com.android.ahat.heapdump {
}
+package com.android.ahat.progress {
+
+ public class NullProgress implements com.android.ahat.progress.Progress {
+ ctor public NullProgress();
+ method public void advance(long);
+ method public void done();
+ method public void start(java.lang.String, long);
+ method public void update(long);
+ }
+
+ public abstract interface Progress {
+ method public default void advance();
+ method public abstract void advance(long);
+ method public abstract void done();
+ method public abstract void start(java.lang.String, long);
+ method public abstract void update(long);
+ }
+
+}
+
package com.android.ahat.proguard {
public class ProguardMap {
diff --git a/tools/ahat/src/main/com/android/ahat/AsciiProgress.java b/tools/ahat/src/main/com/android/ahat/AsciiProgress.java
new file mode 100644
index 0000000000..3ac98a443a
--- /dev/null
+++ b/tools/ahat/src/main/com/android/ahat/AsciiProgress.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat;
+
+import com.android.ahat.progress.Progress;
+
+/**
+ * A progress bar that prints ascii to System.out.
+ * <p>
+ * For best results, have System.out positioned at a new line before using
+ * this progress indicator.
+ */
+class AsciiProgress implements Progress {
+ private String description;
+ private long duration;
+ private long progress;
+
+ private static void display(String description, long percent) {
+ System.out.print(String.format("\r[ %3d%% ] %s ...", percent, description));
+ System.out.flush();
+ }
+
+ @Override
+ public void start(String description, long duration) {
+ assert this.description == null;
+ this.description = description;
+ this.duration = duration;
+ this.progress = 0;
+ display(description, 0);
+ }
+
+ @Override
+ public void advance(long n) {
+ update(progress + n);
+ }
+
+ @Override
+ public void update(long current) {
+ assert description != null;
+ long oldPercent = progress * 100 / duration;
+ long newPercent = current * 100 / duration;
+ progress = current;
+
+ if (newPercent > oldPercent) {
+ display(description, newPercent);
+ }
+ }
+
+ @Override
+ public void done() {
+ update(duration);
+ System.out.println();
+ this.description = null;
+ }
+}
diff --git a/tools/ahat/src/main/com/android/ahat/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
index 04a6012a61..d3cfcf9e94 100644
--- a/tools/ahat/src/main/com/android/ahat/Main.java
+++ b/tools/ahat/src/main/com/android/ahat/Main.java
@@ -20,6 +20,7 @@ import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.Diff;
import com.android.ahat.heapdump.HprofFormatException;
import com.android.ahat.heapdump.Parser;
+import com.android.ahat.progress.Progress;
import com.android.ahat.proguard.ProguardMap;
import com.sun.net.httpserver.HttpServer;
import java.io.File;
@@ -58,10 +59,10 @@ public class Main {
* Prints an error message and exits the application on failure to load the
* heap dump.
*/
- private static AhatSnapshot loadHeapDump(File hprof, ProguardMap map) {
+ private static AhatSnapshot loadHeapDump(File hprof, ProguardMap map, Progress progress) {
System.out.println("Processing '" + hprof + "' ...");
try {
- return Parser.parseHeapDump(hprof, map);
+ return new Parser(hprof).map(map).progress(progress).parse();
} catch (IOException e) {
System.err.println("Unable to load '" + hprof + "':");
e.printStackTrace();
@@ -102,7 +103,7 @@ public class Main {
i++;
try {
map.readFromFile(new File(args[i]));
- } catch (IOException|ParseException ex) {
+ } catch (IOException | ParseException ex) {
System.out.println("Unable to read proguard map: " + ex);
System.out.println("The proguard map will not be used.");
}
@@ -110,7 +111,7 @@ public class Main {
i++;
try {
mapbase.readFromFile(new File(args[i]));
- } catch (IOException|ParseException ex) {
+ } catch (IOException | ParseException ex) {
System.out.println("Unable to read baseline proguard map: " + ex);
System.out.println("The proguard map will not be used.");
}
@@ -152,9 +153,9 @@ public class Main {
System.exit(1);
}
- AhatSnapshot ahat = loadHeapDump(hprof, map);
+ AhatSnapshot ahat = loadHeapDump(hprof, map, new AsciiProgress());
if (hprofbase != null) {
- AhatSnapshot base = loadHeapDump(hprofbase, mapbase);
+ AhatSnapshot base = loadHeapDump(hprofbase, mapbase, new AsciiProgress());
System.out.println("Diffing heap dumps ...");
Diff.snapshots(ahat, base);
diff --git a/tools/ahat/src/main/com/android/ahat/ObjectHandler.java b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java
index bfd5d5cacd..c099da8ceb 100644
--- a/tools/ahat/src/main/com/android/ahat/ObjectHandler.java
+++ b/tools/ahat/src/main/com/android/ahat/ObjectHandler.java
@@ -44,8 +44,7 @@ class ObjectHandler implements AhatHandler {
private static final String DOMINATED_OBJECTS_ID = "dominated";
private static final String INSTANCE_FIELDS_ID = "ifields";
private static final String STATIC_FIELDS_ID = "sfields";
- private static final String HARD_REFS_ID = "refs";
- private static final String SOFT_REFS_ID = "srefs";
+ private static final String REFS_ID = "refs";
private AhatSnapshot mSnapshot;
@@ -223,24 +222,12 @@ class ObjectHandler implements AhatHandler {
private static void printReferences(Doc doc, Query query, AhatInstance inst) {
doc.section("Objects with References to this Object");
- if (inst.getHardReverseReferences().isEmpty()) {
+ if (inst.getReverseReferences().isEmpty()) {
doc.println(DocString.text("(none)"));
} else {
doc.table(new Column("Object"));
- List<AhatInstance> references = inst.getHardReverseReferences();
- SubsetSelector<AhatInstance> selector = new SubsetSelector(query, HARD_REFS_ID, references);
- for (AhatInstance ref : selector.selected()) {
- doc.row(Summarizer.summarize(ref));
- }
- doc.end();
- selector.render(doc);
- }
-
- if (!inst.getSoftReverseReferences().isEmpty()) {
- doc.section("Objects with Soft References to this Object");
- doc.table(new Column("Object"));
- List<AhatInstance> references = inst.getSoftReverseReferences();
- SubsetSelector<AhatInstance> selector = new SubsetSelector(query, SOFT_REFS_ID, references);
+ List<AhatInstance> references = inst.getReverseReferences();
+ SubsetSelector<AhatInstance> selector = new SubsetSelector(query, REFS_ID, references);
for (AhatInstance ref : selector.selected()) {
doc.row(Summarizer.summarize(ref));
}
diff --git a/tools/ahat/src/main/com/android/ahat/Summarizer.java b/tools/ahat/src/main/com/android/ahat/Summarizer.java
index ae0776ab0b..ab88c04d32 100644
--- a/tools/ahat/src/main/com/android/ahat/Summarizer.java
+++ b/tools/ahat/src/main/com/android/ahat/Summarizer.java
@@ -16,7 +16,9 @@
package com.android.ahat;
+import com.android.ahat.heapdump.AhatClassObj;
import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Site;
import com.android.ahat.heapdump.Value;
import java.net.URI;
@@ -50,11 +52,10 @@ class Summarizer {
formatted.append(DocString.removed("del "));
}
- // Annotate unreachable objects as such.
- if (inst.isWeaklyReachable()) {
- formatted.append("weak ");
- } else if (inst.isUnreachable()) {
- formatted.append("unreachable ");
+ // Annotate non-strongly reachable objects as such.
+ Reachability reachability = inst.getReachability();
+ if (reachability != Reachability.STRONG) {
+ formatted.append(reachability.toString() + " ");
}
// Annotate roots as roots.
@@ -100,11 +101,17 @@ class Summarizer {
// Annotate bitmaps with a thumbnail.
AhatInstance bitmap = inst.getAssociatedBitmapInstance();
- String thumbnail = "";
if (bitmap != null) {
URI uri = DocString.formattedUri("bitmap?id=0x%x", bitmap.getId());
formatted.appendThumbnail(uri, "bitmap image");
}
+
+ // Annotate $classOverhead arrays
+ AhatClassObj cls = inst.getAssociatedClassForOverhead();
+ if (cls != null) {
+ formatted.append(" overhead for ");
+ formatted.append(summarize(cls));
+ }
return formatted;
}
diff --git a/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
index d3fea4869a..903211eb50 100644
--- a/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
+++ b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
@@ -16,10 +16,11 @@
package com.android.ahat.dominators;
+import com.android.ahat.progress.NullProgress;
+import com.android.ahat.progress.Progress;
import java.util.ArrayDeque;
-import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Deque;
-import java.util.List;
import java.util.Queue;
/**
@@ -106,43 +107,137 @@ public class DominatorsComputation {
// dominator of B.
public long id;
- // Upper bound on the id of this node's dominator.
- // The true immediate dominator of this node must have id <= domid.
- // This upper bound is slowly tightened as part of the dominators
- // computation.
- public long domid;
+ // The largest id of all nodes reachable from this node.
+ // If foo.id > this.maxReachableId, then foo is not reachable from this
+ // node.
+ public long maxReachableId;
+
+ // The set of ids of nodes that have references to this node.
+ public IdSet inRefIds = new IdSet();
// The current candidate dominator for this node.
- // Invariant: (domid < domS.id) implies this node is on the queue of
- // nodes to be revisited.
+ // The true immediate dominator of this node must have id <= domS.id.
public NodeS domS;
- // A node with a reference to this node that is one step closer to the
- // root than this node.
- // Invariant: srcS.id < this.id
- public NodeS srcS;
-
- // The largest id of the nodes we have seen so far on a path from the root
- // to this node. Used to keep track of which nodes we have already seen
- // and avoid processing them again.
- public long seenid;
-
- // The set of nodes X reachable by 'this' on a path of nodes from the
- // root with increasing ids (possibly excluding X) that this node does not
- // dominate (this.id > X.domid).
- // We can use a List instead of a Set for this because we guarentee based
- // on seenid that we don't add the same node more than once to the list.
- public List<NodeS> undom = new ArrayList<NodeS>();
+ // The previous candidate dominator for this node.
+ // Invariant:
+ // * There are no nodes xS reachable from this node on a path of nodes
+ // with increasing ids (not counting xS.id) for which
+ // this.id > xS.domS.id > this.oldDomS.id.
+ // This ensures that when all nodes xS satisfy xS.domS == xS.oldDomS, we
+ // have found the true immediate dominator of each node.
+ //
+ // Note: We only use this field to tell if this node is scheduled to be
+ // revisited. We could replace it with a boolean to save space, but it
+ // probably doesn't save that much space and it's easier to explain the
+ // algorithm if we can refer to this field.
+ public NodeS oldDomS;
+
+ // The set of nodes that this node is the candidate immediate dominator
+ // of. More precisely, the set of nodes xS such that xS.domS == this.
+ public NodeSet dominated = new NodeSet();
+
+ // The set of nodes that this node is the old candidate immediate
+ // dominator of that need to be revisited. Specifically, the set of nodes
+ // xS such that:
+ // xS.oldDomS == this && xS.oldDomS != xS.domS.
+ //
+ // The empty set is represented as null instead of an empty NodeSet to
+ // save memory.
+ // Invariant:
+ // If revisit != null, this node is on the global list of nodes to be
+ // revisited.
+ public NodeSet revisit = null;
+
+ // Distance from the root to this node. Used for purposes of tracking
+ // progress only.
+ public long depth;
+ }
+
+ // A collection of node ids.
+ private static class IdSet {
+ private int size = 0;
+ private long[] ids = new long[4];
+
+ // Adds an id to the set.
+ public void add(long id) {
+ if (size == ids.length) {
+ ids = Arrays.copyOf(ids, size * 2);
+ }
+ ids[size++] = id;
+ }
+
+ // Returns the most recent id added to the set. Behavior is undefined if
+ // the set is empty.
+ public long last() {
+ assert size != 0;
+ return ids[size - 1];
+ }
+
+ // Returns true if the set contains an id in the range [low, high]
+ // inclusive, false otherwise.
+ public boolean hasIdInRange(long low, long high) {
+ for (int i = 0; i < size; ++i) {
+ if (low <= ids[i] && ids[i] <= high) {
+ return true;
+ }
+ }
+ return false;
+ }
}
+ // An unordered set of nodes data structure supporting efficient iteration
+ // over elements. The bulk of the time spent in the dominators algorithm is
+ // iterating over these sets. Using an array to store the set provides
+ // noticable performance improvements over ArrayList or a linked list.
+ private static class NodeSet {
+ public int size = 0;
+ public NodeS[] nodes = new NodeS[4];
+
+ public void add(NodeS nodeS) {
+ if (size == nodes.length) {
+ nodes = Arrays.copyOf(nodes, size * 2);
+ }
+ nodes[size++] = nodeS;
+ }
+
+ public void remove(NodeS nodeS) {
+ for (int i = 0; i < size; ++i) {
+ if (nodes[i] == nodeS) {
+ remove(i);
+ break;
+ }
+ }
+ }
+
+ public void remove(int index) {
+ nodes[index] = nodes[--size];
+ nodes[size] = null;
+ }
+ }
+
+ // A reference from a source node to a destination node to be processed
+ // during the initial depth-first traversal of nodes.
+ //
+ // Also used as a marker to indicate when the depth-first traversal has been
+ // completed for a node. In that case, srcS is the node depth-first
+ // traversal has been completed for, and dst will be set to null.
private static class Link {
- public NodeS srcS;
- public Node dst;
+ public final NodeS srcS;
+ public final Node dst;
+ // Constructor for a reference from srcS to dst.
public Link(NodeS srcS, Node dst) {
this.srcS = srcS;
this.dst = dst;
}
+
+ // Constructor for a marker indicating depth-first traversal has been
+ // completed for srcS.
+ public Link(NodeS srcS) {
+ this.srcS = srcS;
+ this.dst = null;
+ }
}
/**
@@ -156,127 +251,215 @@ public class DominatorsComputation {
* @see Node
*/
public static void computeDominators(Node root) {
- long id = 0;
+ computeDominators(root, new NullProgress(), 0);
+ }
- // List of all nodes seen. We keep track of this here to update all the
- // dominators once we are done.
- List<NodeS> nodes = new ArrayList<NodeS>();
+ /**
+ * Computes the immediate dominators of all nodes reachable from the <code>root</code> node.
+ * There must not be any incoming references to the <code>root</code> node.
+ * <p>
+ * The result of this function is to call the {@link Node#setDominator}
+ * function on every node reachable from the root node.
+ *
+ * @param root the root node of the dominators computation
+ * @param progress progress tracker.
+ * @param numNodes upper bound on the number of reachable nodes in the
+ * graph, for progress tracking purposes only.
+ * @see Node
+ */
+ public static void computeDominators(Node root, Progress progress, long numNodes) {
+ long id = 0;
- // The set of nodes N such that N.domid < N.domS.id. These nodes need
- // to be revisisted because their dominator is clearly wrong.
+ // The set of nodes xS such that xS.revisit != null.
// Use a Queue instead of a Set because performance will be better. We
- // avoid adding nodes already on the queue by checking whether it was
- // already true that N.domid < N.domS.id, in which case the node is
- // already on the queue.
+ // avoid adding nodes already on the queue by checking
+ // xS == null before adding the node to the queue.
Queue<NodeS> revisit = new ArrayDeque<NodeS>();
// Set up the root node specially.
NodeS rootS = new NodeS();
rootS.node = root;
rootS.id = id++;
+ rootS.depth = 0;
root.setDominatorsComputationState(rootS);
- // 1. Do a depth first search of the nodes, label them with ids and come
- // up with intial candidate dominators for them.
Deque<Link> dfs = new ArrayDeque<Link>();
+ dfs.push(new Link(rootS));
for (Node child : root.getReferencesForDominators()) {
dfs.push(new Link(rootS, child));
}
+ // workBound is an upper bound on the amount of work required in the
+ // second phase of dominators computation, used solely for the purposes of
+ // tracking progress.
+ long workBound = 0;
+
+ // 1. Do a depth first search of the nodes, label them with ids and come
+ // up with initial candidate dominators for them.
+ progress.start("Initializing dominators", numNodes);
while (!dfs.isEmpty()) {
Link link = dfs.pop();
- NodeS dstS = (NodeS)link.dst.getDominatorsComputationState();
- if (dstS == null) {
- // This is the first time we have seen the node. The candidate
- // dominator is link src.
- dstS = new NodeS();
- dstS.node = link.dst;
- dstS.id = id++;
- dstS.domid = link.srcS.id;
- dstS.domS = link.srcS;
- dstS.srcS = link.srcS;
- dstS.seenid = dstS.domid;
- nodes.add(dstS);
- link.dst.setDominatorsComputationState(dstS);
-
- for (Node child : link.dst.getReferencesForDominators()) {
- dfs.push(new Link(dstS, child));
- }
+
+ if (link.dst == null) {
+ // This is the marker link indicating we have now visited all
+ // nodes reachable from link.srcS.
+ link.srcS.maxReachableId = id - 1;
+ progress.advance();
} else {
- // We have seen the node already. Update the state based on the new
- // potential dominator.
- NodeS srcS = link.srcS;
- boolean revisiting = dstS.domid < dstS.domS.id;
-
- while (srcS.id > dstS.seenid) {
- srcS.undom.add(dstS);
- srcS = srcS.srcS;
- }
- dstS.seenid = link.srcS.id;
-
- if (srcS.id < dstS.domid) {
- // In this case, dstS.domid must be wrong, because we just found a
- // path to dstS that does not go through dstS.domid:
- // All nodes from root to srcS have id < domid, and all nodes from
- // srcS to dstS had id > domid, so dstS.domid cannot be on this path
- // from root to dstS.
- dstS.domid = srcS.id;
- if (!revisiting) {
- revisit.add(dstS);
+ NodeS dstS = (NodeS)link.dst.getDominatorsComputationState();
+ if (dstS == null) {
+ // We are seeing the destination node for the first time.
+ // The candidate dominator is the source node.
+ dstS = new NodeS();
+ link.dst.setDominatorsComputationState(dstS);
+
+ dstS.node = link.dst;
+ dstS.id = id++;
+ dstS.inRefIds.add(link.srcS.id);
+ dstS.domS = link.srcS;
+ dstS.domS.dominated.add(dstS);
+ dstS.oldDomS = link.srcS;
+ dstS.depth = link.srcS.depth + 1;
+
+ dfs.push(new Link(dstS));
+ for (Node child : link.dst.getReferencesForDominators()) {
+ dfs.push(new Link(dstS, child));
+ }
+ } else {
+ // We have seen the destination node before. Update the state based
+ // on the new potential dominator.
+ if (dstS.inRefIds.size == 1) {
+ workBound += dstS.oldDomS.depth;
+ }
+
+ long seenid = dstS.inRefIds.last();
+ dstS.inRefIds.add(link.srcS.id);
+
+ // Go up the dominator chain until we reach a node we haven't already
+ // seen with a path to dstS.
+ NodeS xS = link.srcS;
+ while (xS.id > seenid) {
+ xS = xS.domS;
+ }
+
+ // The new dominator for dstS must have an id less than the node we
+ // just reached. Pull the dominator for dstS up its dominator
+ // chain until we find a suitable new dominator for dstS.
+ long domid = xS.id;
+ if (dstS.domS.id > domid) {
+ // Mark the node as needing to be revisited.
+ if (dstS.domS == dstS.oldDomS) {
+ if (dstS.oldDomS.revisit == null) {
+ dstS.oldDomS.revisit = new NodeSet();
+ revisit.add(dstS.oldDomS);
+ }
+ dstS.oldDomS.revisit.add(dstS);
+ }
+
+ // Update the node's candidate dominator.
+ dstS.domS.dominated.remove(dstS);
+ do {
+ dstS.domS = dstS.domS.domS;
+ } while (dstS.domS.id > domid);
+ dstS.domS.dominated.add(dstS);
}
}
}
}
+ progress.done();
- // 2. Continue revisiting nodes until they all satisfy the requirement
- // that domS.id <= domid.
+ // 2. Continue revisiting nodes until every node satisfies the requirement
+ // that domS.id == oldDomS.id.
+ progress.start("Resolving dominators", workBound);
while (!revisit.isEmpty()) {
- NodeS nodeS = revisit.poll();
- NodeS domS = nodeS.domS;
- assert nodeS.domid < domS.id;
- while (domS.id > nodeS.domid) {
- if (domS.domS.id < nodeS.domid) {
- // In this case, nodeS.domid must be wrong, because there is a path
- // from root to nodeS that does not go through nodeS.domid:
- // * We can go from root to domS without going through nodeS.domid,
- // because otherwise nodeS.domid would dominate domS, not
- // domS.domS.
- // * We can go from domS to nodeS without going through nodeS.domid
- // because we know nodeS is reachable from domS on a path of nodes
- // with increases ids, which cannot include nodeS.domid, which
- // has a smaller id than domS.
- nodeS.domid = domS.domS.id;
+ NodeS oldDomS = revisit.poll();
+ assert oldDomS.revisit != null;
+
+ NodeSet nodes = oldDomS.revisit;
+ oldDomS.revisit = null;
+
+ // Search for pairs of nodes nodeS, xS for which
+ // nodeS.id > xS.domS.id > nodeS.oldDomS.id
+ // and there is a path of nodes with increasing ids from nodeS to xS.
+ // In that case, xS.domS must be wrong, because there is a path to xS
+ // from the root that does not go through xS.domS:
+ // * There is a path from the root to nodeS.oldDomS that doesn't go
+ // through xS.domS. Otherwise xS.domS would be a dominator of
+ // nodeS.oldDomS, but it can't be because xS.domS.id > nodeS.oldDomS.id.
+ // * There is a path from nodeS.oldDomS to nodeS that doesn't go through
+ // xS.domS, because xS.domS is not a dominator of nodeS.
+ // * There is a path from nodeS to xS that doesn't go through xS.domS,
+ // because we have a path of increasing ids from nodeS to xS, none of
+ // which can have an id smaller than nodeS as xS.domS does.
+ for (int i = 0; i < oldDomS.dominated.size; ++i) {
+ NodeS xS = oldDomS.dominated.nodes[i];
+ for (int j = 0; j < nodes.size; ++j) {
+ NodeS nodeS = nodes.nodes[j];
+ assert nodeS.oldDomS == oldDomS;
+ if (isReachableAscending(nodeS, xS)) {
+ // Update the dominator for xS.
+ if (xS.domS == xS.oldDomS) {
+ if (xS.oldDomS.revisit == null) {
+ xS.oldDomS.revisit = new NodeSet();
+ revisit.add(xS.oldDomS);
+ }
+ xS.oldDomS.revisit.add(xS);
+ }
+ oldDomS.dominated.remove(i--);
+ xS.domS = nodeS.domS;
+ xS.domS.dominated.add(xS);
+ break;
+ }
}
- domS.undom.add(nodeS);
- domS = domS.srcS;
}
- nodeS.domS = domS;
- nodeS.domid = domS.id;
-
- for (NodeS xS : nodeS.undom) {
- if (domS.id < xS.domid) {
- // In this case, xS.domid must be wrong, because there is a path
- // from the root to xX that does not go through xS.domid:
- // * We can go from root to nodeS without going through xS.domid,
- // because otherwise xS.domid would dominate nodeS, not domS.
- // * We can go from nodeS to xS without going through xS.domid
- // because we know xS is reachable from nodeS on a path of nodes
- // with increasing ids, which cannot include xS.domid, which has
- // a smaller id than nodeS.
- boolean revisiting = xS.domid < xS.domS.id;
- xS.domid = domS.id;
- if (!revisiting) {
- revisit.add(xS);
+
+ // We can now safely update oldDomS for each of the nodes nodeS while
+ // preserving the oldDomS invariant.
+ for (int i = 0; i < nodes.size; ++i) {
+ NodeS nodeS = nodes.nodes[i];
+ nodeS.oldDomS = oldDomS.oldDomS;
+ if (nodeS.oldDomS != nodeS.domS) {
+ if (nodeS.oldDomS.revisit == null) {
+ nodeS.oldDomS.revisit = new NodeSet();
+ revisit.add(nodeS.oldDomS);
}
+ nodeS.oldDomS.revisit.add(nodeS);
}
}
+ progress.advance((oldDomS.depth - oldDomS.oldDomS.depth) * nodes.size);
}
+ progress.done();
- // 3. Update the dominators of the nodes.
- root.setDominatorsComputationState(null);
- for (NodeS nodeS : nodes) {
- nodeS.node.setDominator(nodeS.domS.node);
+
+ // 3. We have figured out the correct dominator for each node. Notify the
+ // user of the results by doing one last traversal of the nodes.
+ assert revisit.isEmpty();
+ revisit.add(rootS);
+ while (!revisit.isEmpty()) {
+ NodeS nodeS = revisit.poll();
+ assert nodeS.domS == nodeS.oldDomS;
+ assert nodeS.revisit == null;
nodeS.node.setDominatorsComputationState(null);
+ for (int i = 0; i < nodeS.dominated.size; ++i) {
+ NodeS xS = nodeS.dominated.nodes[i];
+ xS.node.setDominator(nodeS.node);
+ revisit.add(xS);
+ }
}
}
+
+ // Returns true if there is a path from srcS to dstS of nodes with ascending
+ // ids (not including dstS.id).
+ private static boolean isReachableAscending(NodeS srcS, NodeS dstS) {
+ if (dstS.id < srcS.id) {
+ // The first time we saw dstS was before we saw srcS. See if srcS is on
+ // the source chain for any nodes with direct references to dstS.
+ return dstS.inRefIds.hasIdInRange(srcS.id, srcS.maxReachableId);
+ }
+
+ // Otherwise dstS is only reachable from srcS on a node with ascending ids
+ // if it was visited for the first time while performing the depth-first
+ // traversal of srcS.
+ return dstS.id <= srcS.maxReachableId;
+ }
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
index 9c80802673..cf48d6d459 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatArrayInstance.java
@@ -240,7 +240,10 @@ public class AhatArrayInstance extends AhatInstance {
if (value != null) {
assert value.isAhatInstance();
String field = "[" + Integer.toString(index) + "]";
- return new Reference(AhatArrayInstance.this, field, value.asAhatInstance(), true);
+ return new Reference(AhatArrayInstance.this,
+ field,
+ value.asAhatInstance(),
+ Reachability.STRONG);
}
return null;
}
@@ -324,7 +327,7 @@ public class AhatArrayInstance extends AhatInstance {
@Override public AhatInstance getAssociatedBitmapInstance() {
if (mByteArray != null) {
- List<AhatInstance> refs = getHardReverseReferences();
+ List<AhatInstance> refs = getReverseReferences();
if (refs.size() == 1) {
AhatInstance ref = refs.get(0);
return ref.getAssociatedBitmapInstance();
@@ -333,6 +336,26 @@ public class AhatArrayInstance extends AhatInstance {
return null;
}
+ @Override public AhatClassObj getAssociatedClassForOverhead() {
+ if (mByteArray != null) {
+ List<AhatInstance> refs = getHardReverseReferences();
+ if (refs.size() == 1) {
+ AhatClassObj ref = refs.get(0).asClassObj();
+ if (ref != null) {
+ for (FieldValue field : ref.getStaticFieldValues()) {
+ if (field.name.equals("$classOverhead")) {
+ if (field.value.asAhatInstance() == this) {
+ return ref;
+ }
+ return null;
+ }
+ }
+ }
+ }
+ }
+ return null;
+ }
+
@Override public String toString() {
String className = getClassName();
if (className.endsWith("[]")) {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
index c82ef20e9b..f377ae37bc 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassInstance.java
@@ -104,10 +104,7 @@ public class AhatClassInstance extends AhatInstance {
@Override
Iterable<Reference> getReferences() {
- if (isInstanceOfClass("java.lang.ref.Reference")) {
- return new WeakReferentReferenceIterator();
- }
- return new StrongReferenceIterator();
+ return new ReferenceIterator();
}
/**
@@ -352,59 +349,48 @@ public class AhatClassInstance extends AhatInstance {
}
/**
- * A Reference iterator that iterates over the fields of this instance
- * assuming all field references are strong references.
+ * Returns the reachability type associated with this instance.
+ * For example, returns Reachability.WEAK for an instance of
+ * java.lang.ref.WeakReference.
*/
- private class StrongReferenceIterator implements Iterable<Reference>,
- Iterator<Reference> {
- private Iterator<FieldValue> mIter = getInstanceFields().iterator();
- private Reference mNext = null;
-
- @Override
- public boolean hasNext() {
- while (mNext == null && mIter.hasNext()) {
- FieldValue field = mIter.next();
- if (field.value != null && field.value.isAhatInstance()) {
- AhatInstance ref = field.value.asAhatInstance();
- mNext = new Reference(AhatClassInstance.this, "." + field.name, ref, true);
- }
- }
- return mNext != null;
- }
-
- @Override
- public Reference next() {
- if (!hasNext()) {
- throw new NoSuchElementException();
+ private Reachability getJavaLangRefType() {
+ AhatClassObj cls = getClassObj();
+ while (cls != null) {
+ switch (cls.getName()) {
+ case "java.lang.ref.PhantomReference": return Reachability.PHANTOM;
+ case "java.lang.ref.WeakReference": return Reachability.WEAK;
+ case "java.lang.ref.FinalizerReference": return Reachability.FINALIZER;
+ case "java.lang.ref.SoftReference": return Reachability.SOFT;
}
- Reference next = mNext;
- mNext = null;
- return next;
- }
-
- @Override
- public Iterator<Reference> iterator() {
- return this;
+ cls = cls.getSuperClassObj();
}
+ return Reachability.STRONG;
}
/**
- * A Reference iterator that iterates over the fields of a subclass of
- * java.lang.ref.Reference, where the 'referent' field is considered weak.
+ * A Reference iterator that iterates over the fields of this instance.
*/
- private class WeakReferentReferenceIterator implements Iterable<Reference>,
- Iterator<Reference> {
- private Iterator<FieldValue> mIter = getInstanceFields().iterator();
+ private class ReferenceIterator implements Iterable<Reference>,
+ Iterator<Reference> {
+ private final Iterator<FieldValue> mIter = getInstanceFields().iterator();
private Reference mNext = null;
+ // If we are iterating over a subclass of java.lang.ref.Reference, the
+ // 'referent' field doesn't have strong reachability. mJavaLangRefType
+ // describes what type of java.lang.ref.Reference subinstance this is.
+ private final Reachability mJavaLangRefType = getJavaLangRefType();
+
@Override
public boolean hasNext() {
while (mNext == null && mIter.hasNext()) {
FieldValue field = mIter.next();
if (field.value != null && field.value.isAhatInstance()) {
- boolean strong = !field.name.equals("referent");
+ Reachability reachability = Reachability.STRONG;
+ if (mJavaLangRefType != Reachability.STRONG && "referent".equals(field.name)) {
+ reachability = mJavaLangRefType;
+ }
AhatInstance ref = field.value.asAhatInstance();
- mNext = new Reference(AhatClassInstance.this, "." + field.name, ref, strong);
+ mNext = new Reference(AhatClassInstance.this, "." + field.name, ref, reachability);
}
}
return mNext != null;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
index 36ada2857c..765a411e41 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatClassObj.java
@@ -131,7 +131,10 @@ public class AhatClassObj extends AhatInstance {
FieldValue field = mStaticFieldValues[index];
Value value = field.value;
if (value != null && value.isAhatInstance()) {
- return new Reference(AhatClassObj.this, "." + field.name, value.asAhatInstance(), true);
+ return new Reference(AhatClassObj.this,
+ "." + field.name,
+ value.asAhatInstance(),
+ Reachability.STRONG);
}
return null;
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index 67253bf0e7..20f368f4ff 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -17,12 +17,14 @@
package com.android.ahat.heapdump;
import com.android.ahat.dominators.DominatorsComputation;
+import com.android.ahat.progress.Progress;
import java.awt.image.BufferedImage;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
+import java.util.EnumMap;
import java.util.List;
import java.util.Queue;
@@ -47,11 +49,11 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
// Field initialized via addRegisterednativeSize.
private long mRegisteredNativeSize = 0;
- // Fields initialized in computeReverseReferences().
+ // Fields initialized in computeReachability().
+ private Reachability mReachability = Reachability.UNREACHABLE;
private AhatInstance mNextInstanceToGcRoot;
private String mNextInstanceToGcRootField;
- private ArrayList<AhatInstance> mHardReverseReferences;
- private ArrayList<AhatInstance> mSoftReverseReferences;
+ private ArrayList<AhatInstance> mReverseReferences;
// Fields initialized in DominatorsComputation.computeDominators().
// mDominated - the list of instances immediately dominated by this instance.
@@ -156,6 +158,15 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
}
/**
+ * Returns the reachability of the instance.
+ *
+ * @return the reachability of the instance.
+ */
+ public Reachability getReachability() {
+ return mReachability;
+ }
+
+ /**
* Returns true if this object is strongly reachable. An object is strongly
* reachable if there exists a path of (strong) references from some root
* object to this object.
@@ -163,7 +174,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* @return true if the object is strongly reachable
*/
public boolean isStronglyReachable() {
- return mImmediateDominator != null;
+ return mReachability == Reachability.STRONG;
}
/**
@@ -177,10 +188,13 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* Unlike a strongly reachable object, a weakly reachable object is allowed
* to be garbage collected.
*
+ * @deprecated Use {@link #getReachability()} instead, which can distinguish
+ * among soft, weak, phantom, and other kinds of references.
+ *
* @return true if the object is weakly reachable
*/
- public boolean isWeaklyReachable() {
- return !isStronglyReachable() && mNextInstanceToGcRoot != null;
+ @Deprecated public boolean isWeaklyReachable() {
+ return !isStronglyReachable() && !isUnreachable();
}
/**
@@ -192,7 +206,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* @return true if the object is completely unreachable
*/
public boolean isUnreachable() {
- return !isStronglyReachable() && !isWeaklyReachable();
+ return mReachability == Reachability.UNREACHABLE;
}
/**
@@ -214,7 +228,6 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
* Returns true if this instance is a GC root.
*
* @return true if this instance is a GC root.
- * @see getRootTypes
*/
public boolean isRoot() {
return mRootTypes != 0;
@@ -373,28 +386,50 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
}
/**
- * Returns a list of objects with (strong) references to this object.
+ * Returns a list of objects with any kind of reference to this object.
*
* @return the objects referencing this object
*/
- public List<AhatInstance> getHardReverseReferences() {
- if (mHardReverseReferences != null) {
- return mHardReverseReferences;
+ public List<AhatInstance> getReverseReferences() {
+ if (mReverseReferences != null) {
+ return mReverseReferences;
}
return Collections.emptyList();
}
/**
+ * Returns a list of objects with (strong) references to this object.
+ *
+ * @deprecated Use {@link #getReverseReferences()} instead.
+ *
+ * @return the objects referencing this object
+ */
+ @Deprecated public List<AhatInstance> getHardReverseReferences() {
+ List<AhatInstance> refs = new ArrayList<AhatInstance>();
+ for (AhatInstance ref : getReverseReferences()) {
+ if (ref.getReachability() == Reachability.STRONG && ref.getReferent() != this) {
+ refs.add(ref);
+ }
+ }
+ return refs;
+ }
+
+ /**
* Returns a list of objects with soft/weak/phantom/finalizer references to
* this object.
*
+ * @deprecated Use {@link #getReverseReferences()} instead.
+ *
* @return the objects weakly referencing this object
*/
- public List<AhatInstance> getSoftReverseReferences() {
- if (mSoftReverseReferences != null) {
- return mSoftReverseReferences;
+ @Deprecated public List<AhatInstance> getSoftReverseReferences() {
+ List<AhatInstance> refs = new ArrayList<AhatInstance>();
+ for (AhatInstance ref : getReverseReferences()) {
+ if (ref.getReachability() != Reachability.STRONG || ref.getReferent() == this) {
+ refs.add(ref);
+ }
}
- return Collections.emptyList();
+ return refs;
}
/**
@@ -452,6 +487,18 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
}
/**
+ * Returns the class object that this object represents the overhead for.
+ * ART adds a fake byte[] $classOverhead static field to classes to show the
+ * overheads associated with the class. If this is one such byte[] instance,
+ * returns the class it is associated with. Otherwise null is returned.
+ *
+ * @return the class instance that this is the overhead for
+ */
+ public AhatClassObj getAssociatedClassForOverhead() {
+ return null;
+ }
+
+ /**
* Returns the (bounded-length) string associated with this instance.
* Applies to instances of java.lang.String, char[], and in some cases
* byte[]. Returns null if this object cannot be interpreted as a string.
@@ -597,73 +644,61 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
}
/**
- * Initialize the reverse reference fields of this instance and all other
- * instances reachable from it. Initializes the following fields:
+ * Determine the reachability of the all instances reachable from the given
+ * root instance. Initializes the following fields:
+ * mReachability
* mNextInstanceToGcRoot
* mNextInstanceToGcRootField
- * mHardReverseReferences
- * mSoftReverseReferences
+ * mReverseReferences
+ *
+ * @param progress used to track progress of the traversal.
+ * @param numInsts upper bound on the total number of instances reachable
+ * from the root, solely used for the purposes of tracking
+ * progress.
*/
- static void computeReverseReferences(SuperRoot root) {
+ static void computeReachability(SuperRoot root, Progress progress, long numInsts) {
// Start by doing a breadth first search through strong references.
- // Then continue the breadth first search through weak references.
- Queue<Reference> strong = new ArrayDeque<Reference>();
- Queue<Reference> weak = new ArrayDeque<Reference>();
+ // Then continue the breadth first through each weaker kind of reference.
+ progress.start("Computing reachability", numInsts);
+ EnumMap<Reachability, Queue<Reference>> queues = new EnumMap<>(Reachability.class);
+ for (Reachability reachability : Reachability.values()) {
+ queues.put(reachability, new ArrayDeque<Reference>());
+ }
for (Reference ref : root.getReferences()) {
- strong.add(ref);
+ queues.get(Reachability.STRONG).add(ref);
}
- while (!strong.isEmpty()) {
- Reference ref = strong.poll();
- assert ref.strong;
-
- if (ref.ref.mNextInstanceToGcRoot == null) {
- // This is the first time we have seen ref.ref.
- ref.ref.mNextInstanceToGcRoot = ref.src;
- ref.ref.mNextInstanceToGcRootField = ref.field;
- ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
-
- for (Reference childRef : ref.ref.getReferences()) {
- if (childRef.strong) {
- strong.add(childRef);
- } else {
- weak.add(childRef);
+ for (Reachability reachability : Reachability.values()) {
+ Queue<Reference> queue = queues.get(reachability);
+ while (!queue.isEmpty()) {
+ Reference ref = queue.poll();
+ if (ref.ref.mReachability == Reachability.UNREACHABLE) {
+ // This is the first time we have seen ref.ref.
+ progress.advance();
+ ref.ref.mReachability = reachability;
+ ref.ref.mNextInstanceToGcRoot = ref.src;
+ ref.ref.mNextInstanceToGcRootField = ref.field;
+ ref.ref.mReverseReferences = new ArrayList<AhatInstance>();
+
+ for (Reference childRef : ref.ref.getReferences()) {
+ if (childRef.reachability.ordinal() <= reachability.ordinal()) {
+ queue.add(childRef);
+ } else {
+ queues.get(childRef.reachability).add(childRef);
+ }
}
}
- }
- // Note: We specifically exclude 'root' from the reverse references
- // because it is a fake SuperRoot instance not present in the original
- // heap dump.
- if (ref.src != root) {
- ref.ref.mHardReverseReferences.add(ref.src);
- }
- }
-
- while (!weak.isEmpty()) {
- Reference ref = weak.poll();
-
- if (ref.ref.mNextInstanceToGcRoot == null) {
- // This is the first time we have seen ref.ref.
- ref.ref.mNextInstanceToGcRoot = ref.src;
- ref.ref.mNextInstanceToGcRootField = ref.field;
- ref.ref.mHardReverseReferences = new ArrayList<AhatInstance>();
-
- for (Reference childRef : ref.ref.getReferences()) {
- weak.add(childRef);
- }
- }
-
- if (ref.strong) {
- ref.ref.mHardReverseReferences.add(ref.src);
- } else {
- if (ref.ref.mSoftReverseReferences == null) {
- ref.ref.mSoftReverseReferences = new ArrayList<AhatInstance>();
+ // Note: We specifically exclude 'root' from the reverse references
+ // because it is a fake SuperRoot instance not present in the original
+ // heap dump.
+ if (ref.src != root) {
+ ref.ref.mReverseReferences.add(ref.src);
}
- ref.ref.mSoftReverseReferences.add(ref.src);
}
}
+ progress.done();
}
/**
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
index 535db082c1..d9c7a19431 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
@@ -17,6 +17,7 @@
package com.android.ahat.heapdump;
import com.android.ahat.dominators.DominatorsComputation;
+import com.android.ahat.progress.Progress;
import java.util.List;
/**
@@ -39,7 +40,8 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
AhatSnapshot(SuperRoot root,
Instances<AhatInstance> instances,
List<AhatHeap> heaps,
- Site rootSite) {
+ Site rootSite,
+ Progress progress) {
mSuperRoot = root;
mInstances = instances;
mHeaps = heaps;
@@ -53,8 +55,8 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
}
}
- AhatInstance.computeReverseReferences(mSuperRoot);
- DominatorsComputation.computeDominators(mSuperRoot);
+ AhatInstance.computeReachability(mSuperRoot, progress, mInstances.size());
+ DominatorsComputation.computeDominators(mSuperRoot, progress, mInstances.size());
AhatInstance.computeRetainedSize(mSuperRoot, mHeaps.size());
for (AhatHeap heap : mHeaps) {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
index 0b99e496cc..8c8de2383b 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
@@ -37,7 +37,7 @@ class DominatorReferenceIterator implements Iterator<AhatInstance>,
public boolean hasNext() {
while (mNext == null && mIter.hasNext()) {
Reference ref = mIter.next();
- if (ref.strong) {
+ if (ref.reachability == Reachability.STRONG) {
mNext = ref.ref;
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java
index 085144650f..7bb19a244b 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Instances.java
@@ -67,6 +67,10 @@ class Instances<T extends AhatInstance> implements Iterable<T> {
return null;
}
+ public int size() {
+ return mInstances.size();
+ }
+
@Override
public Iterator<T> iterator() {
return mInstances.iterator();
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
index 13be57d415..597a260628 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
@@ -16,6 +16,8 @@
package com.android.ahat.heapdump;
+import com.android.ahat.progress.NullProgress;
+import com.android.ahat.progress.Progress;
import com.android.ahat.proguard.ProguardMap;
import java.io.File;
import java.io.IOException;
@@ -33,35 +35,95 @@ import java.util.Map;
/**
* Provides methods for parsing heap dumps.
+ * <p>
+ * The heap dump should be a heap dump in the J2SE HPROF format optionally
+ * with Android extensions and satisfying the following additional
+ * constraints:
+ * <ul>
+ * <li>
+ * Class serial numbers, stack frames, and stack traces individually satisfy
+ * the following:
+ * <ul>
+ * <li> All elements are defined before they are referenced.
+ * <li> Ids are densely packed in some range [a, b] where a is not necessarily 0.
+ * <li> There are not more than 2^31 elements defined.
+ * </ul>
+ * <li> All classes are defined via a LOAD CLASS record before the first
+ * heap dump segment.
+ * <li> The ID size used in the heap dump is 4 bytes.
+ * </ul>
*/
public class Parser {
private static final int ID_SIZE = 4;
- private Parser() {
+ private HprofBuffer hprof = null;
+ private ProguardMap map = new ProguardMap();
+ private Progress progress = new NullProgress();
+
+ /**
+ * Creates an hprof Parser that parses a heap dump from a byte buffer.
+ *
+ * @param hprof byte buffer to parse the heap dump from.
+ */
+ public Parser(ByteBuffer hprof) {
+ this.hprof = new HprofBuffer(hprof);
+ }
+
+ /**
+ * Creates an hprof Parser that parses a heap dump from a file.
+ *
+ * @param hprof file to parse the heap dump from.
+ * @throws IOException if the file cannot be accessed.
+ */
+ public Parser(File hprof) throws IOException {
+ this.hprof = new HprofBuffer(hprof);
}
/**
- * Parses a heap dump from a File.
- * <p>
- * The heap dump should be a heap dump in the J2SE HPROF format optionally
- * with Android extensions and satisfying the following additional
- * constraints:
- * <ul>
- * <li>
- * Class serial numbers, stack frames, and stack traces individually satisfy
- * the following:
- * <ul>
- * <li> All elements are defined before they are referenced.
- * <li> Ids are densely packed in some range [a, b] where a is not necessarily 0.
- * <li> There are not more than 2^31 elements defined.
- * </ul>
- * <li> All classes are defined via a LOAD CLASS record before the first
- * heap dump segment.
- * <li> The ID size used in the heap dump is 4 bytes.
- * </ul>
- * <p>
- * The given proguard map will be used to deobfuscate class names, field
- * names, and stack traces in the heap dump.
+ * Sets the proguard map to use for deobfuscating the heap.
+ *
+ * @param map proguard map to use to deobfuscate the heap.
+ * @return this Parser instance.
+ */
+ public Parser map(ProguardMap map) {
+ if (map == null) {
+ throw new NullPointerException("map == null");
+ }
+ this.map = map;
+ return this;
+ }
+
+ /**
+ * Sets the progress indicator to use when parsing the heap.
+ *
+ * @param progress progress indicator to use when parsing the heap.
+ * @return this Parser instance.
+ */
+ public Parser progress(Progress progress) {
+ if (progress == null) {
+ throw new NullPointerException("progress == null");
+ }
+ this.progress = progress;
+ return this;
+ }
+
+ /**
+ * Parse the heap dump.
+ *
+ * @throws IOException if the heap dump could not be read
+ * @throws HprofFormatException if the heap dump is not properly formatted
+ * @return the parsed heap dump
+ */
+ public AhatSnapshot parse() throws IOException, HprofFormatException {
+ try {
+ return parseInternal();
+ } catch (BufferUnderflowException e) {
+ throw new HprofFormatException("Unexpected end of file", e);
+ }
+ }
+
+ /**
+ * Parses a heap dump from a File with given proguard map.
*
* @param hprof the hprof file to parse
* @param map the proguard map for deobfuscation
@@ -71,35 +133,11 @@ public class Parser {
*/
public static AhatSnapshot parseHeapDump(File hprof, ProguardMap map)
throws IOException, HprofFormatException {
- try {
- return parseHeapDump(new HprofBuffer(hprof), map);
- } catch (BufferUnderflowException e) {
- throw new HprofFormatException("Unexpected end of file", e);
- }
+ return new Parser(hprof).map(map).parse();
}
/**
- * Parses a heap dump from a byte buffer.
- * <p>
- * The heap dump should be a heap dump in the J2SE HPROF format optionally
- * with Android extensions and satisfying the following additional
- * constraints:
- * <ul>
- * <li>
- * Class serial numbers, stack frames, and stack traces individually satisfy
- * the following:
- * <ul>
- * <li> All elements are defined before they are referenced.
- * <li> Ids are densely packed in some range [a, b] where a is not necessarily 0.
- * <li> There are not more than 2^31 elements defined.
- * </ul>
- * <li> All classes are defined via a LOAD CLASS record before the first
- * heap dump segment.
- * <li> The ID size used in the heap dump is 4 bytes.
- * </ul>
- * <p>
- * The given proguard map will be used to deobfuscate class names, field
- * names, and stack traces in the heap dump.
+ * Parses a heap dump from a byte buffer with given proguard map.
*
* @param hprof the bytes of the hprof file to parse
* @param map the proguard map for deobfuscation
@@ -109,15 +147,10 @@ public class Parser {
*/
public static AhatSnapshot parseHeapDump(ByteBuffer hprof, ProguardMap map)
throws IOException, HprofFormatException {
- try {
- return parseHeapDump(new HprofBuffer(hprof), map);
- } catch (BufferUnderflowException e) {
- throw new HprofFormatException("Unexpected end of file", e);
- }
+ return new Parser(hprof).map(map).parse();
}
- private static AhatSnapshot parseHeapDump(HprofBuffer hprof, ProguardMap map)
- throws IOException, HprofFormatException, BufferUnderflowException {
+ private AhatSnapshot parseInternal() throws IOException, HprofFormatException {
// Read, and mostly ignore, the hprof header info.
{
StringBuilder format = new StringBuilder();
@@ -154,7 +187,9 @@ public class Parser {
ArrayList<AhatClassObj> classes = new ArrayList<AhatClassObj>();
Instances<AhatClassObj> classById = null;
+ progress.start("Reading hprof", hprof.size());
while (hprof.hasRemaining()) {
+ progress.update(hprof.tell());
int tag = hprof.getU1();
int time = hprof.getU4();
int recordLength = hprof.getU4();
@@ -230,6 +265,7 @@ public class Parser {
}
int subtag;
while (!isEndOfHeapDumpSegment(subtag = hprof.getU1())) {
+ progress.update(hprof.tell());
switch (subtag) {
case 0x01: { // ROOT JNI GLOBAL
long objectId = hprof.getId();
@@ -524,6 +560,7 @@ public class Parser {
break;
}
}
+ progress.done();
instances.addAll(classes);
}
@@ -542,9 +579,11 @@ public class Parser {
// that we couldn't previously resolve.
SuperRoot superRoot = new SuperRoot();
{
+ progress.start("Resolving references", mInstances.size());
Iterator<RootData> ri = roots.iterator();
RootData root = ri.next();
for (AhatInstance inst : mInstances) {
+ progress.advance();
long id = inst.getId();
// Skip past any roots that don't have associated instances.
@@ -613,11 +652,12 @@ public class Parser {
((AhatArrayInstance)inst).initialize(array);
}
}
+ progress.done();
}
hprof = null;
roots = null;
- return new AhatSnapshot(superRoot, mInstances, heaps.heaps, rootSite);
+ return new AhatSnapshot(superRoot, mInstances, heaps.heaps, rootSite, progress);
}
private static boolean isEndOfHeapDumpSegment(int subtag) {
@@ -867,6 +907,13 @@ public class Parser {
}
/**
+ * Returns the size of the file in bytes.
+ */
+ public int size() {
+ return mBuffer.capacity();
+ }
+
+ /**
* Return the current absolution position in the file.
*/
public int tell() {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java
new file mode 100644
index 0000000000..8df6c8ca23
--- /dev/null
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.heapdump;
+
+/**
+ * Enum corresponding to the reachability of an instance.
+ * See {@link java.lang.ref} for a specification of the various kinds of
+ * reachibility. The enum constants are specified in decreasing order of
+ * strength.
+ */
+public enum Reachability {
+ /**
+ * The instance is strongly reachable.
+ */
+ STRONG("strong"),
+
+ /**
+ * The instance is softly reachable.
+ */
+ SOFT("soft"),
+
+ /**
+ * The instance is finalizer reachable, but is neither strongly nor softly
+ * reachable.
+ */
+ FINALIZER("finalizer"),
+
+ /**
+ * The instance is weakly reachable.
+ */
+ WEAK("weak"),
+
+ /**
+ * The instance is phantom reachable.
+ */
+ PHANTOM("phantom"),
+
+ /**
+ * The instance is unreachable.
+ */
+ UNREACHABLE("unreachable");
+
+ /**
+ * The name of the reachibility.
+ */
+ private final String name;
+
+ Reachability(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String toString() {
+ return name;
+ }
+}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
index f1340bd07b..2de76fdc87 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Reference.java
@@ -20,19 +20,18 @@ package com.android.ahat.heapdump;
* Reference represents a reference from 'src' to 'ref' through 'field'.
* Field is a string description for human consumption. This is typically
* either "." followed by the field name or an array subscript such as "[4]".
- * 'strong' is true if this is a strong reference, false if it is a
- * weak/soft/other reference.
+ * reachability describes whether the reference is strong/soft/weak/etc.
*/
class Reference {
public final AhatInstance src;
public final String field;
public final AhatInstance ref;
- public final boolean strong;
+ public final Reachability reachability;
- public Reference(AhatInstance src, String field, AhatInstance ref, boolean strong) {
+ public Reference(AhatInstance src, String field, AhatInstance ref, Reachability reachability) {
this.src = src;
this.field = field;
this.ref = ref;
- this.strong = strong;
+ this.reachability = reachability;
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
index b01cffff72..d06df900fb 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
@@ -54,7 +54,7 @@ class SuperRoot extends AhatInstance implements DominatorsComputation.Node {
@Override
public Reference get(int index) {
String field = ".roots[" + Integer.toString(index) + "]";
- return new Reference(SuperRoot.this, field, mRoots.get(index), true);
+ return new Reference(SuperRoot.this, field, mRoots.get(index), Reachability.STRONG);
}
};
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
index b219bf1564..d78f95b548 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
@@ -209,7 +209,7 @@ public abstract class Value {
@Override
public abstract String toString();
- private Value getBaseline() {
+ Value getBaseline() {
return this;
}
@@ -396,7 +396,8 @@ public abstract class Value {
return mInstance.toString();
}
- public Value getBaseline() {
+ @Override
+ Value getBaseline() {
return InstanceValue.pack(mInstance.getBaseline());
}
diff --git a/tools/ahat/src/main/com/android/ahat/progress/NullProgress.java b/tools/ahat/src/main/com/android/ahat/progress/NullProgress.java
new file mode 100644
index 0000000000..a0ca08487b
--- /dev/null
+++ b/tools/ahat/src/main/com/android/ahat/progress/NullProgress.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.progress;
+
+/**
+ * Null progress tracker that ignores all updates.
+ */
+public class NullProgress implements Progress {
+ @Override public void start(String description, long duration) { }
+ @Override public void advance() { }
+ @Override public void advance(long n) { }
+ @Override public void update(long current) { }
+ @Override public void done() { }
+}
diff --git a/tools/ahat/src/main/com/android/ahat/progress/Progress.java b/tools/ahat/src/main/com/android/ahat/progress/Progress.java
new file mode 100644
index 0000000000..a10379da7a
--- /dev/null
+++ b/tools/ahat/src/main/com/android/ahat/progress/Progress.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.progress;
+
+/**
+ * Interface for notifying users of progress during long operations.
+ */
+public interface Progress {
+ /**
+ * Called to indicate the start of a new phase of work with the given
+ * duration. Behavior is undefined if there is a current phase in progress.
+ *
+ * @param description human readable description of the work to be done.
+ * @param duration the maximum duration of the phase, in arbitrary units
+ * appropriate for the work in question.
+ */
+ void start(String description, long duration);
+
+ /**
+ * Called to indicate the current phase has advanced a single unit of its
+ * overall duration towards completion. Behavior is undefined if there is no
+ * current phase in progress.
+ */
+ default void advance() {
+ advance(1);
+ }
+
+ /**
+ * Called to indicate the current phase has advanced <code>n</code> units of
+ * its overall duration towards completion. Behavior is undefined if there
+ * is no current phase in progress.
+ *
+ * @param n number of units of progress that have advanced.
+ */
+ void advance(long n);
+
+ /**
+ * Called to indicate the current phase has completed <code>current</code>
+ * absolute units of its overall duration. Behavior is undefined if there is
+ * no current phase in progress.
+ *
+ * @param current progress towards duration
+ */
+ void update(long current);
+
+ /**
+ * Called to indicates that the current phase has been completed. Behavior
+ * is undefined if there is no current phase in progress.
+ */
+ void done();
+}
diff --git a/tools/ahat/src/test-dump/DumpedStuff.java b/tools/ahat/src/test-dump/DumpedStuff.java
index 98ead07492..804a3a37d4 100644
--- a/tools/ahat/src/test-dump/DumpedStuff.java
+++ b/tools/ahat/src/test-dump/DumpedStuff.java
@@ -136,6 +136,7 @@ public class DumpedStuff extends SuperDumpedStuff {
public WeakReference aWeakReference = new WeakReference(anObject, referenceQueue);
public WeakReference aNullReferentReference = new WeakReference(null, referenceQueue);
public SoftReference aSoftReference = new SoftReference(new Object());
+ public Reference reachabilityReferenceChain;
public byte[] bigArray;
public ObjectTree[] gcPathArray = new ObjectTree[]{null, null,
new ObjectTree(
@@ -145,7 +146,7 @@ public class DumpedStuff extends SuperDumpedStuff {
public Reference aLongStrongPathToSamplePathObject;
public WeakReference aShortWeakPathToSamplePathObject;
public WeakReference aWeakRefToGcRoot = new WeakReference(Main.class);
- public SoftReference aWeakChain = new SoftReference(new Reference(new Reference(new Object())));
+ public SoftReference aSoftChain = new SoftReference(new Reference(new Reference(new Object())));
public Object[] basicStringRef;
public AddedObject addedObject;
public UnchangedObject unchangedObject = new UnchangedObject();
@@ -157,4 +158,15 @@ public class DumpedStuff extends SuperDumpedStuff {
public int[] modifiedArray;
public Object objectAllocatedAtKnownSite;
public Object objectAllocatedAtKnownSubSite;
+
+ // Allocate those objects that we need to not be GC'd before taking the heap
+ // dump.
+ public void shouldNotGc() {
+ reachabilityReferenceChain = new Reference(
+ new SoftReference(
+ new Reference(
+ new WeakReference(
+ new SoftReference(
+ new PhantomReference(new Object(), referenceQueue))))));
+ }
}
diff --git a/tools/ahat/src/test-dump/Main.java b/tools/ahat/src/test-dump/Main.java
index de3674846b..ca18fd8cec 100644
--- a/tools/ahat/src/test-dump/Main.java
+++ b/tools/ahat/src/test-dump/Main.java
@@ -49,6 +49,8 @@ public class Main {
stuff.basicStringRef = new Object[]{stuff.basicString};
}
+ stuff.shouldNotGc();
+
// Take a heap dump that will include that instance of DumpedStuff.
System.err.println("Dumping hprof data to " + file);
VMDebug.dumpHprofData(file);
diff --git a/tools/ahat/src/test/com/android/ahat/Tests.java b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java
index 0e7043291d..bce1f053be 100644
--- a/tools/ahat/src/test/com/android/ahat/Tests.java
+++ b/tools/ahat/src/test/com/android/ahat/AhatTestSuite.java
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 The Android Open Source Project
+ * Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,28 +17,32 @@
package com.android.ahat;
import org.junit.runner.JUnitCore;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
-public class Tests {
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ DiffFieldsTest.class,
+ DiffTest.class,
+ DominatorsTest.class,
+ HtmlEscaperTest.class,
+ InstanceTest.class,
+ NativeAllocationTest.class,
+ ObjectHandlerTest.class,
+ OverviewHandlerTest.class,
+ PerformanceTest.class,
+ ProguardMapTest.class,
+ RootedHandlerTest.class,
+ QueryTest.class,
+ SiteHandlerTest.class,
+ SiteTest.class
+})
+
+public class AhatTestSuite {
public static void main(String[] args) {
if (args.length == 0) {
- args = new String[]{
- "com.android.ahat.DiffFieldsTest",
- "com.android.ahat.DiffTest",
- "com.android.ahat.DominatorsTest",
- "com.android.ahat.HtmlEscaperTest",
- "com.android.ahat.InstanceTest",
- "com.android.ahat.NativeAllocationTest",
- "com.android.ahat.ObjectHandlerTest",
- "com.android.ahat.OverviewHandlerTest",
- "com.android.ahat.PerformanceTest",
- "com.android.ahat.ProguardMapTest",
- "com.android.ahat.RootedHandlerTest",
- "com.android.ahat.QueryTest",
- "com.android.ahat.SiteHandlerTest",
- "com.android.ahat.SiteTest",
- };
+ args = new String[]{"com.android.ahat.AhatTestSuite"};
}
JUnitCore.main(args);
}
}
-
diff --git a/tools/ahat/src/test/com/android/ahat/DiffTest.java b/tools/ahat/src/test/com/android/ahat/DiffTest.java
index 585f29ae61..b1952b28b0 100644
--- a/tools/ahat/src/test/com/android/ahat/DiffTest.java
+++ b/tools/ahat/src/test/com/android/ahat/DiffTest.java
@@ -18,6 +18,7 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.Value;
import java.io.IOException;
import org.junit.Test;
@@ -51,6 +52,9 @@ public class DiffTest {
assertEquals(b, a.getBaseline());
assertEquals(a.getSite(), b.getSite().getBaseline());
assertEquals(b.getSite(), a.getSite().getBaseline());
+
+ Value va = Value.pack(a);
+ assertEquals(b, Value.getBaseline(va).asAhatInstance());
}
@Test
diff --git a/tools/ahat/src/test/com/android/ahat/DominatorsTest.java b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
index 0424e10dc8..d9af363659 100644
--- a/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
+++ b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
@@ -295,4 +295,35 @@ public class DominatorsTest {
assertEquals(p, d.dominator);
assertEquals(p, e.dominator);
}
+
+ @Test
+ public void twiceRevisit() {
+ // /---->---\
+ // / /--> f -->-\
+ // --> a --> b -->--x---> c --> d
+ // \----------->----/
+ // A regression test for a bug where we failed to ever revisit a node more
+ // than once. The node c is revisited a first time to bring its dominator
+ // up to b. c needs to be revisited again after the dominator for f is
+ // pulled up to a, and that revisit of c is necessary to ensure the
+ // dominator for d is pulled up to a.
+ Node a = new Node("a");
+ Node b = new Node("b");
+ Node x = new Node("x");
+ Node c = new Node("c");
+ Node d = new Node("d");
+ Node f = new Node("f");
+ a.depends = Arrays.asList(f, b);
+ b.depends = Arrays.asList(f, d, x);
+ x.depends = Arrays.asList(c);
+ c.depends = Arrays.asList(d);
+ f.depends = Arrays.asList(c);
+
+ a.computeDominators();
+ assertEquals(a, b.dominator);
+ assertEquals(b, x.dominator);
+ assertEquals(a, c.dominator);
+ assertEquals(a, d.dominator);
+ assertEquals(a, f.dominator);
+ }
}
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index 8fbb8849f0..f886e9df5f 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -21,6 +21,7 @@ import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.PathElement;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Size;
import com.android.ahat.heapdump.Value;
import java.io.IOException;
@@ -216,10 +217,31 @@ public class InstanceTest {
AhatInstance ref = dump.getDumpedAhatInstance("aSoftReference");
AhatInstance referent = ref.getReferent();
assertNotNull(referent);
+ assertEquals(Reachability.SOFT, referent.getReachability());
assertTrue(referent.isWeaklyReachable());
}
@Test
+ public void reachability() throws IOException {
+ TestDump dump = TestDump.getTestDump();
+ AhatInstance strong1 = dump.getDumpedAhatInstance("reachabilityReferenceChain");
+ AhatInstance soft1 = strong1.getField("referent").asAhatInstance();
+ AhatInstance strong2 = soft1.getField("referent").asAhatInstance();
+ AhatInstance weak1 = strong2.getField("referent").asAhatInstance();
+ AhatInstance soft2 = weak1.getField("referent").asAhatInstance();
+ AhatInstance phantom1 = soft2.getField("referent").asAhatInstance();
+ AhatInstance obj = phantom1.getField("referent").asAhatInstance();
+
+ assertEquals(Reachability.STRONG, strong1.getReachability());
+ assertEquals(Reachability.STRONG, soft1.getReachability());
+ assertEquals(Reachability.SOFT, strong2.getReachability());
+ assertEquals(Reachability.SOFT, weak1.getReachability());
+ assertEquals(Reachability.WEAK, soft2.getReachability());
+ assertEquals(Reachability.WEAK, phantom1.getReachability());
+ assertEquals(Reachability.PHANTOM, obj.getReachability());
+ }
+
+ @Test
public void gcRootPath() throws IOException {
TestDump dump = TestDump.getTestDump();
@@ -388,24 +410,31 @@ public class InstanceTest {
// We had a bug in the past where weak references to GC roots caused the
// roots to be incorrectly be considered weakly reachable.
+ assertEquals(Reachability.STRONG, root.getReachability());
assertTrue(root.isStronglyReachable());
assertFalse(root.isWeaklyReachable());
}
@Test
- public void weakReferenceChain() throws IOException {
+ public void softReferenceChain() throws IOException {
// If the only reference to a chain of strongly referenced objects is a
- // weak reference, then all of the objects should be considered weakly
+ // soft reference, then all of the objects should be considered softly
// reachable.
TestDump dump = TestDump.getTestDump();
- AhatInstance ref = dump.getDumpedAhatInstance("aWeakChain");
- AhatInstance weak1 = ref.getField("referent").asAhatInstance();
- AhatInstance weak2 = weak1.getField("referent").asAhatInstance();
- AhatInstance weak3 = weak2.getField("referent").asAhatInstance();
+ AhatInstance ref = dump.getDumpedAhatInstance("aSoftChain");
+ AhatInstance soft1 = ref.getField("referent").asAhatInstance();
+ AhatInstance soft2 = soft1.getField("referent").asAhatInstance();
+ AhatInstance soft3 = soft2.getField("referent").asAhatInstance();
assertTrue(ref.isStronglyReachable());
- assertTrue(weak1.isWeaklyReachable());
- assertTrue(weak2.isWeaklyReachable());
- assertTrue(weak3.isWeaklyReachable());
+ assertEquals(Reachability.SOFT, soft1.getReachability());
+ assertEquals(Reachability.SOFT, soft2.getReachability());
+ assertEquals(Reachability.SOFT, soft3.getReachability());
+
+ // Test the deprecated isWeaklyReachable API, which interprets weak as any
+ // kind of phantom/finalizer/weak/soft reference.
+ assertTrue(soft1.isWeaklyReachable());
+ assertTrue(soft2.isWeaklyReachable());
+ assertTrue(soft3.isWeaklyReachable());
}
@Test
@@ -414,6 +443,8 @@ public class InstanceTest {
AhatInstance obj = dump.getDumpedAhatInstance("anObject");
AhatInstance ref = dump.getDumpedAhatInstance("aReference");
AhatInstance weak = dump.getDumpedAhatInstance("aWeakReference");
+ assertTrue(obj.getReverseReferences().contains(ref));
+ assertTrue(obj.getReverseReferences().contains(weak));
assertTrue(obj.getHardReverseReferences().contains(ref));
assertFalse(obj.getHardReverseReferences().contains(weak));
assertFalse(obj.getSoftReverseReferences().contains(ref));
@@ -481,4 +512,19 @@ public class InstanceTest {
assertEquals("java.lang.String", str.getClassName());
assertNull(str.asString());
}
+
+ @Test
+ public void classOverhead() throws IOException {
+ TestDump dump = TestDump.getTestDump("O.hprof", null, null);
+ AhatSnapshot snapshot = dump.getAhatSnapshot();
+
+ // class libore.io.IoTracker has byte[124]@12c028d1 as its class overhead.
+ AhatInstance overhead = snapshot.findInstance(0x12c028d1);
+ AhatClassObj cls = overhead.getAssociatedClassForOverhead();
+ assertEquals(0x12c028d0, cls.getId());
+ assertEquals("libcore.io.IoTracker", cls.getName());
+
+ // Other kinds of objects should not have associated classes for overhead.
+ assertNull(cls.getAssociatedClassForOverhead());
+ }
}
diff --git a/tools/art b/tools/art
index 1c603d4fa7..62df7eb328 100644
--- a/tools/art
+++ b/tools/art
@@ -22,6 +22,7 @@ DELETE_ANDROID_DATA="no"
LAUNCH_WRAPPER=
LIBART=libart.so
JIT_PROFILE="no"
+ALLOW_DEFAULT_JDWP="no"
VERBOSE="no"
CLEAN_OAT_FILES="yes"
EXTRA_OPTIONS=()
@@ -77,7 +78,6 @@ Usage: art [OPTIONS] [--] [ART_OPTIONS] CLASS
Supported OPTIONS include:
--32 Use the 32-bit Android Runtime.
--64 Use the 64-bit Android Runtime.
- --callgrind Launch the Android Runtime in callgrind.
-d Use the debug ART library (libartd.so).
--debug Equivalent to -d.
--gdb Launch the Android Runtime in gdb.
@@ -91,6 +91,8 @@ Supported OPTIONS include:
--profile Run with profiling, then run using profile data.
--verbose Run script verbosely.
--no-clean Don't cleanup oat directories.
+ --allow-default-jdwp Don't automatically put in -XjdwpProvider:none.
+ You probably do not want this.
The ART_OPTIONS are passed directly to the Android Runtime.
@@ -269,9 +271,6 @@ while [[ "$1" = "-"* ]]; do
--64)
ART_BINARY=dalvikvm64
;;
- --callgrind)
- LAUNCH_WRAPPER="valgrind --tool=callgrind"
- ;;
-d)
;& # Fallthrough
--debug)
@@ -310,6 +309,9 @@ while [[ "$1" = "-"* ]]; do
--no-clean)
CLEAN_OAT_FILES="no"
;;
+ --allow-default-jdwp)
+ ALLOW_DEFAULT_JDWP="yes"
+ ;;
--*)
echo "unknown option: $1" 1>&2
usage
@@ -361,6 +363,10 @@ if [ "$PERF" != "" ]; then
EXTRA_OPTIONS+=(-Xcompiler-option --generate-debug-info)
fi
+if [ "$ALLOW_DEFAULT_JDWP" = "no" ]; then
+ EXTRA_OPTIONS+=(-XjdwpProvider:none)
+fi
+
if [ "$JIT_PROFILE" = "yes" ]; then
# Create the profile. The runtime expects profiles to be created before
# execution.
diff --git a/tools/build/var_list b/tools/build/var_list
index bb005cf77c..98a54725da 100644
--- a/tools/build/var_list
+++ b/tools/build/var_list
@@ -34,3 +34,6 @@ HOST_PREFER_32_BIT
HOST_OUT_EXECUTABLES
ANDROID_JAVA_TOOLCHAIN
+# b/62653020
+DIST_DIR
+SOONG_OUT_DIR
diff --git a/tools/cleanup-buildbot-device.sh b/tools/cleanup-buildbot-device.sh
index ca5219aa25..694c739e07 100755
--- a/tools/cleanup-buildbot-device.sh
+++ b/tools/cleanup-buildbot-device.sh
@@ -28,33 +28,16 @@ if [[ -n "$ART_TEST_CHROOT" ]]; then
exit 1
fi
- echo -e "${green}Clean up /system in chroot${nc}"
- # Remove all files under /system except the potential property_contexts file.
- #
- # The current ART Buildbot set-up runs the "setup device" step
- # (performed by script tools/setup-buildbot-device.sh) before the
- # "device cleanup" step (implemented by this script). As
- # property_contexts file aliases are created during the former step,
- # we need this exception to prevent the property_contexts file under
- # /system in the chroot from being removed by the latter step.
- #
- # TODO: Reorder ART Buildbot steps so that "device cleanup" happens
- # before "setup device" and remove this special case.
- adb shell test -d "$ART_TEST_CHROOT/system" \
- "&&" find "$ART_TEST_CHROOT/system" \
- ! -path "$ART_TEST_CHROOT/system/etc/selinux/plat_property_contexts" \
- ! -type d \
- -exec rm -f \{\} +
+ if adb shell test -d "$ART_TEST_CHROOT"; then
+ echo -e "${green}Remove entire /system directory from chroot directory${nc}"
+ adb shell rm -rf "$ART_TEST_CHROOT/system"
- echo -e "${green}Clean up some subdirs in /data in chroot${nc}"
- adb shell rm -rf \
- "$ART_TEST_CHROOT/data/local/tmp/*" \
- "$ART_TEST_CHROOT/data/art-test" \
- "$ART_TEST_CHROOT/data/nativetest" \
- "$ART_TEST_CHROOT/data/nativetest64" \
- "$ART_TEST_CHROOT/data/run-test" \
- "$ART_TEST_CHROOT/data/dalvik-cache/*" \
- "$ART_TEST_CHROOT/data/misc/trace/*"
+ echo -e "${green}Remove entire /data directory from chroot directory${nc}"
+ adb shell rm -rf "$ART_TEST_CHROOT/data"
+
+ echo -e "${green}Remove entire chroot directory${nc}"
+ adb shell rmdir "$ART_TEST_CHROOT" || adb shell ls -la "$ART_TEST_CHROOT"
+ fi
else
adb shell rm -rf \
/data/local/tmp /data/art-test /data/nativetest /data/nativetest64 '/data/misc/trace/*'
diff --git a/tools/desugar.sh b/tools/desugar.sh
deleted file mode 100755
index 7f73852ee5..0000000000
--- a/tools/desugar.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Calls desugar.jar with the --bootclasspath_entry values passed in automatically.
-# (This avoids having to manually set a boot class path).
-#
-#
-# Script-specific args:
-# --mode=[host|target]: Select between host or target bootclasspath (default target).
-# --core-only: Use only "core" bootclasspath (e.g. do not include framework).
-# --show-commands: Print the desugar command being executed.
-# --help: Print above list of args.
-#
-# All other args are forwarded to desugar.jar
-#
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-TOP=$DIR/../..
-
-pushd "$TOP" >/dev/null # back to android root.
-
-out=${OUT_DIR:-out}
-desugar_jar=$out/host/linux-x86/framework/desugar.jar
-
-if ! [[ -f $desugar_jar ]]; then
- echo "Error: Missing $desugar_jar; did you do a build?" >&2
- exit 1
-fi
-
-desugar_jar=$(readlink -f "$desugar_jar") # absolute path to desugar jar
-popd >/dev/null
-
-bootjars_args=
-mode=target
-showcommands=n
-while true; do
- case $1 in
- --help)
- echo "Usage: $0 [--mode=host|target] [--core-only] [--show-commands] <desugar args>"
- exit 0
- ;;
- --mode=host)
- bootjars_args="$bootjars_args --host"
- ;;
- --mode=target)
- bootjars_args="$bootjars_args --target"
- ;;
- --mode=*)
- echo "Unsupported $0 usage with --mode=$1" >&2
- exit 1
- ;;
- --core-only)
- bootjars_args="$bootjars_args --core"
- ;;
- --show-commands)
- showcommands=y
- ;;
- *)
- break
- ;;
- esac
- shift
-done
-
-desugar_args=(--min_sdk_version=10000)
-boot_class_path_list=$($TOP/art/tools/bootjars.sh $bootjars_args --path)
-
-for path in $boot_class_path_list; do
- desugar_args+=(--bootclasspath_entry="$path")
-done
-
-if [[ ${#desugar_args[@]} -eq 0 ]]; then
- echo "FATAL: Missing bootjars.sh file path list" >&2
- exit 1
-fi
-
-if [[ $showcommands == y ]]; then
- echo java -jar "$desugar_jar" "${desugar_args[@]}" "$@"
-fi
-
-java -jar "$desugar_jar" "${desugar_args[@]}" "$@"
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index a229d73d01..9515ca5c50 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -20,6 +20,7 @@ cc_defaults {
host_supported: true,
srcs: [
"dexanalyze.cc",
+ "dexanalyze_bytecode.cc",
"dexanalyze_experiments.cc",
],
target: {
diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc
index 7a9b8fb018..c90bb9cf6a 100644
--- a/tools/dexanalyze/dexanalyze.cc
+++ b/tools/dexanalyze/dexanalyze.cc
@@ -21,6 +21,7 @@
#include <android-base/file.h>
+#include "dexanalyze_bytecode.h"
#include "dexanalyze_experiments.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
@@ -28,6 +29,7 @@
#include "dex/dex_instruction-inl.h"
namespace art {
+namespace dexanalyze {
class DexAnalyze {
static constexpr int kExitCodeUsageError = 1;
@@ -51,9 +53,12 @@ class DexAnalyze {
<< " -count_indices (Count dex indices accessed from code items)\n"
<< " -analyze-strings (Analyze string data)\n"
<< " -analyze-debug-info (Analyze debug info)\n"
+ << " -new-bytecode (Bytecode optimizations)\n"
<< " -i (Ignore Dex checksum and verification failures)\n"
<< " -a (Run all experiments)\n"
- << " -d (Dump on per DEX basis)\n";
+ << " -n <int> (run experiment with 1 .. n as argument)\n"
+ << " -d (Dump on per Dex basis)\n"
+ << " -v (Verbose dumping)\n";
return kExitCodeUsageError;
}
@@ -65,14 +70,25 @@ class DexAnalyze {
if (arg == "-i") {
verify_checksum_ = false;
run_dex_file_verifier_ = false;
+ } else if (arg == "-v") {
+ verbose_ = true;
} else if (arg == "-a") {
run_all_experiments_ = true;
+ } else if (arg == "-n") {
+ if (i + 1 >= argc) {
+ return Usage(argv);
+ }
+ std::istringstream iss(argv[i + 1]);
+ iss >> experiment_max_;
+ ++i;
} else if (arg == "-count-indices") {
exp_count_indices_ = true;
} else if (arg == "-analyze-strings") {
exp_analyze_strings_ = true;
} else if (arg == "-analyze-debug-info") {
exp_debug_info_ = true;
+ } else if (arg == "-new-bytecode") {
+ exp_bytecode_ = true;
} else if (arg == "-d") {
dump_per_input_dex_ = true;
} else if (!arg.empty() && arg[0] == '-') {
@@ -88,6 +104,7 @@ class DexAnalyze {
return 0;
}
+ bool verbose_ = false;
bool verify_checksum_ = true;
bool run_dex_file_verifier_ = true;
bool dump_per_input_dex_ = false;
@@ -95,7 +112,9 @@ class DexAnalyze {
bool exp_code_metrics_ = false;
bool exp_analyze_strings_ = false;
bool exp_debug_info_ = false;
+ bool exp_bytecode_ = false;
bool run_all_experiments_ = false;
+ uint64_t experiment_max_ = 1u;
std::vector<std::string> filenames_;
};
@@ -114,6 +133,22 @@ class DexAnalyze {
if (options->run_all_experiments_ || options->exp_debug_info_) {
experiments_.emplace_back(new AnalyzeDebugInfo);
}
+ if (options->run_all_experiments_ || options->exp_bytecode_) {
+ for (size_t i = 0; i < options->experiment_max_; ++i) {
+ uint64_t exp_value = 0u;
+ if (i == 0) {
+ exp_value = std::numeric_limits<uint64_t>::max();
+ } else if (i == 1) {
+ exp_value = 0u;
+ } else {
+ exp_value = 1u << (i - 2);
+ }
+ experiments_.emplace_back(new NewRegisterInstructions(exp_value));
+ }
+ }
+ for (const std::unique_ptr<Experiment>& experiment : experiments_) {
+ experiment->dump_ = options->verbose_;
+ }
}
bool ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
@@ -188,9 +223,10 @@ class DexAnalyze {
}
};
+} // namespace dexanalyze
} // namespace art
int main(int argc, char** argv) {
- return art::DexAnalyze::Run(argc, argv);
+ return art::dexanalyze::DexAnalyze::Run(argc, argv);
}
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
new file mode 100644
index 0000000000..6bc892164d
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -0,0 +1,547 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dexanalyze_bytecode.h"
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+
+#include "dex/class_accessor-inl.h"
+#include "dex/code_item_accessors-inl.h"
+#include "dex/dex_instruction-inl.h"
+
+namespace art {
+namespace dexanalyze {
+
+// Given a map of <key, usage count>, sort by most used and assign index <key, index in most used>
+enum class Order {
+ kMostUsed,
+ kNormal,
+};
+
+template <typename T, typename U>
+static inline SafeMap<T, U> SortByOrder(const SafeMap<T, U>& usage, Order order) {
+ std::vector<std::pair<U, T>> most_used;
+ for (const auto& pair : usage) {
+ most_used.emplace_back(pair.second, pair.first);
+ }
+ if (order == Order::kMostUsed) {
+ std::sort(most_used.rbegin(), most_used.rend());
+ }
+ U current_index = 0u;
+ SafeMap<T, U> ret;
+ for (auto&& pair : most_used) {
+ CHECK(ret.emplace(pair.second, current_index++).second);
+ }
+ return ret;
+}
+
+static inline std::ostream& operator<<(std::ostream& os, const std::vector<uint8_t>& bytes) {
+ os << std::hex;
+ for (const uint8_t& c : bytes) {
+ os << std::setw(2) << std::setfill('0') << static_cast<uint32_t>(c)
+ << (&c != &bytes.back() ? " " : "");
+ }
+ os << std::dec;
+ return os;
+}
+
+void NewRegisterInstructions::ProcessDexFiles(
+ const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+ std::set<std::vector<uint8_t>> deduped;
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ std::map<size_t, TypeLinkage> types;
+ std::set<const void*> visited;
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ InstructionBuilder inst_builder(types,
+ /*count_types*/ true,
+ /*dump*/ false,
+ experiments_,
+ instruction_freq_);
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ inst_builder.Process(*dex_file, method.GetInstructionsAndData(), accessor.GetClassIdx());
+ }
+ }
+ // Reorder to get an index for each map instead of a count.
+ for (auto&& pair : types) {
+ pair.second.types_ = SortByOrder(pair.second.types_, Order::kMostUsed);
+ pair.second.fields_ = SortByOrder(pair.second.fields_, Order::kMostUsed);
+ pair.second.methods_ = SortByOrder(pair.second.methods_, Order::kMostUsed);
+ pair.second.strings_ = SortByOrder(pair.second.strings_, Order::kMostUsed);
+ }
+ // Visit classes and convert code items.
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ InstructionBuilder inst_builder(types,
+ /*count_types*/ false,
+ dump_,
+ experiments_,
+ instruction_freq_);
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (method.GetCodeItem() == nullptr || !visited.insert(method.GetCodeItem()).second) {
+ continue;
+ }
+ if (dump_) {
+ std::cout << std::endl
+ << "Processing " << dex_file->PrettyMethod(method.GetIndex(), true);
+ }
+ CodeItemDataAccessor data = method.GetInstructionsAndData();
+ inst_builder.Process(*dex_file, data, accessor.GetClassIdx());
+ std::vector<uint8_t> buffer = std::move(inst_builder.buffer_);
+ const size_t buffer_size = buffer.size();
+ dex_code_bytes_ += data.InsnsSizeInBytes();
+ output_size_ += buffer_size;
+ // Add extra data at the end to have fair dedupe.
+ EncodeUnsignedLeb128(&buffer, data.RegistersSize());
+ EncodeUnsignedLeb128(&buffer, data.InsSize());
+ EncodeUnsignedLeb128(&buffer, data.OutsSize());
+ EncodeUnsignedLeb128(&buffer, data.TriesSize());
+ EncodeUnsignedLeb128(&buffer, data.InsnsSizeInCodeUnits());
+ if (deduped.insert(buffer).second) {
+ deduped_size_ += buffer_size;
+ }
+ }
+ missing_field_idx_count_ += inst_builder.missing_field_idx_count_;
+ missing_method_idx_count_ += inst_builder.missing_method_idx_count_;
+ }
+ }
+}
+
+void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const {
+ os << "Enabled experiments " << experiments_ << std::endl;
+ os << "Total Dex code bytes: " << Percent(dex_code_bytes_, total_size) << "\n";
+ os << "Total output code bytes: " << Percent(output_size_, total_size) << "\n";
+ os << "Total deduped code bytes: " << Percent(deduped_size_, total_size) << "\n";
+ os << "Missing field idx count: " << missing_field_idx_count_ << "\n";
+ os << "Missing method idx count: " << missing_method_idx_count_ << "\n";
+ std::vector<std::pair<size_t, std::vector<uint8_t>>> pairs;
+ for (auto&& pair : instruction_freq_) {
+ if (pair.second > 0 && !pair.first.empty()) {
+ // Savings exclude one byte per occurrence and one occurence from having the macro
+ // dictionary.
+ pairs.emplace_back((pair.second - 1) * (pair.first.size() - 1), pair.first);
+ }
+ }
+ std::sort(pairs.rbegin(), pairs.rend());
+ os << "Top instruction bytecode sizes and hex dump" << "\n";
+ uint64_t top_instructions_savings = 0u;
+ for (size_t i = 0; i < 128 && i < pairs.size(); ++i) {
+ top_instructions_savings += pairs[i].first;
+ if (dump_ || (true)) {
+ auto bytes = pairs[i].second;
+ // Remove opcode bytes.
+ bytes.erase(bytes.begin());
+ os << Percent(pairs[i].first, total_size) << " "
+ << Instruction::Name(static_cast<Instruction::Code>(pairs[i].second[0]))
+ << "(" << bytes << ")\n";
+ }
+ }
+ os << "Top instructions 1b macro savings "
+ << Percent(top_instructions_savings, total_size) << "\n";
+}
+
+InstructionBuilder::InstructionBuilder(std::map<size_t, TypeLinkage>& types,
+ bool count_types,
+ bool dump,
+ uint64_t experiments,
+ std::map<std::vector<uint8_t>, size_t>& instruction_freq)
+ : types_(types),
+ count_types_(count_types),
+ dump_(dump),
+ experiments_(experiments),
+ instruction_freq_(instruction_freq) {}
+
+void InstructionBuilder::Process(const DexFile& dex_file,
+ const CodeItemDataAccessor& code_item,
+ dex::TypeIndex current_class_type) {
+ TypeLinkage& current_type = types_[current_class_type.index_];
+ bool skip_next = false;
+ size_t last_start = 0u;
+ for (auto inst = code_item.begin(); ; ++inst) {
+ if (!count_types_ && last_start != buffer_.size()) {
+ // Register the instruction blob.
+ ++instruction_freq_[std::vector<uint8_t>(buffer_.begin() + last_start, buffer_.end())];
+ last_start = buffer_.size();
+ }
+ if (inst == code_item.end()) {
+ break;
+ }
+ if (dump_) {
+ std::cout << std::endl;
+ std::cout << inst->DumpString(nullptr);
+ if (skip_next) {
+ std::cout << " (SKIPPED)";
+ }
+ }
+ if (skip_next) {
+ skip_next = false;
+ continue;
+ }
+ bool is_iget = false;
+ const Instruction::Code opcode = inst->Opcode();
+ Instruction::Code new_opcode = opcode;
+ switch (opcode) {
+ case Instruction::IGET:
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT:
+ is_iget = true;
+ FALLTHROUGH_INTENDED;
+ case Instruction::IPUT:
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT: {
+ const uint32_t dex_field_idx = inst->VRegC_22c();
+ if (Enabled(kExperimentSingleGetSet)) {
+ // Test deduplication improvements from replacing all iget/set with the same opcode.
+ new_opcode = is_iget ? Instruction::IGET : Instruction::IPUT;
+ }
+ CHECK_LT(dex_field_idx, dex_file.NumFieldIds());
+ dex::TypeIndex holder_type = dex_file.GetFieldId(dex_field_idx).class_idx_;
+ uint32_t receiver = inst->VRegB_22c();
+ uint32_t first_arg_reg = code_item.RegistersSize() - code_item.InsSize();
+ uint32_t out_reg = inst->VRegA_22c();
+ if (Enabled(kExperimentInstanceFieldSelf) &&
+ first_arg_reg == receiver &&
+ holder_type == current_class_type) {
+ if (count_types_) {
+ ++current_type.fields_.FindOrAdd(dex_field_idx)->second;
+ } else {
+ uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx);
+ ExtendPrefix(&out_reg, &field_idx);
+ CHECK(InstNibbles(new_opcode, {out_reg, field_idx}));
+ continue;
+ }
+ } else if (Enabled(kExperimentInstanceField)) {
+ if (count_types_) {
+ ++current_type.types_.FindOrAdd(holder_type.index_)->second;
+ ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second;
+ } else {
+ uint32_t type_idx = current_type.types_.Get(holder_type.index_);
+ uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx);
+ ExtendPrefix(&type_idx, &field_idx);
+ CHECK(InstNibbles(new_opcode, {out_reg, receiver, type_idx, field_idx}));
+ continue;
+ }
+ }
+ break;
+ }
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO: {
+ const bool is_jumbo = opcode == Instruction::CONST_STRING_JUMBO;
+ const uint16_t str_idx = is_jumbo ? inst->VRegB_31c() : inst->VRegB_21c();
+ uint32_t out_reg = is_jumbo ? inst->VRegA_31c() : inst->VRegA_21c();
+ if (Enabled(kExperimentString)) {
+ new_opcode = Instruction::CONST_STRING;
+ if (count_types_) {
+ ++current_type.strings_.FindOrAdd(str_idx)->second;
+ } else {
+ uint32_t idx = current_type.strings_.Get(str_idx);
+ ExtendPrefix(&out_reg, &idx);
+ CHECK(InstNibbles(opcode, {out_reg, idx}));
+ continue;
+ }
+ }
+ break;
+ }
+ case Instruction::SGET:
+ case Instruction::SGET_WIDE:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT:
+ case Instruction::SPUT:
+ case Instruction::SPUT_WIDE:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT: {
+ uint32_t out_reg = inst->VRegA_21c();
+ const uint32_t dex_field_idx = inst->VRegB_21c();
+ CHECK_LT(dex_field_idx, dex_file.NumFieldIds());
+ dex::TypeIndex holder_type = dex_file.GetFieldId(dex_field_idx).class_idx_;
+ if (Enabled(kExperimentStaticField)) {
+ if (holder_type == current_class_type) {
+ if (count_types_) {
+ ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second;
+ } else {
+ uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx);
+ ExtendPrefix(&out_reg, &field_idx);
+ if (InstNibbles(new_opcode, {out_reg, field_idx})) {
+ continue;
+ }
+ }
+ } else {
+ if (count_types_) {
+ ++types_[current_class_type.index_].types_.FindOrAdd(holder_type.index_)->second;
+ ++types_[holder_type.index_].fields_.FindOrAdd(dex_field_idx)->second;
+ } else {
+ uint32_t type_idx = current_type.types_.Get(holder_type.index_);
+ uint32_t field_idx = types_[holder_type.index_].fields_.Get(dex_field_idx);
+ ExtendPrefix(&type_idx, &field_idx);
+ if (InstNibbles(new_opcode, {out_reg >> 4, out_reg & 0xF, type_idx, field_idx})) {
+ continue;
+ }
+ }
+ }
+ }
+ break;
+ }
+ // Invoke cases.
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_SUPER: {
+ const uint32_t method_idx = DexMethodIndex(inst.Inst());
+ const DexFile::MethodId& method = dex_file.GetMethodId(method_idx);
+ const dex::TypeIndex receiver_type = method.class_idx_;
+ if (Enabled(kExperimentInvoke)) {
+ if (count_types_) {
+ ++current_type.types_.FindOrAdd(receiver_type.index_)->second;
+ ++types_[receiver_type.index_].methods_.FindOrAdd(method_idx)->second;
+ } else {
+ uint32_t args[6] = {};
+ uint32_t arg_count = inst->GetVarArgs(args);
+
+ bool next_move_result = false;
+ uint32_t dest_reg = 0;
+ auto next = std::next(inst);
+ if (next != code_item.end()) {
+ next_move_result =
+ next->Opcode() == Instruction::MOVE_RESULT ||
+ next->Opcode() == Instruction::MOVE_RESULT_WIDE ||
+ next->Opcode() == Instruction::MOVE_RESULT_OBJECT;
+ if (next_move_result) {
+ dest_reg = next->VRegA_11x();
+ }
+ }
+
+ bool result = false;
+ uint32_t type_idx = current_type.types_.Get(receiver_type.index_);
+ uint32_t local_idx = types_[receiver_type.index_].methods_.Get(method_idx);
+ ExtendPrefix(&type_idx, &local_idx);
+ ExtendPrefix(&dest_reg, &local_idx);
+ if (arg_count == 0) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx});
+ } else if (arg_count == 1) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0]});
+ } else if (arg_count == 2) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
+ args[1]});
+ } else if (arg_count == 3) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
+ args[1], args[2]});
+ } else if (arg_count == 4) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
+ args[1], args[2], args[3]});
+ } else if (arg_count == 5) {
+ result = InstNibbles(opcode, {dest_reg, type_idx, local_idx, args[0],
+ args[1], args[2], args[3], args[4]});
+ }
+
+ if (result) {
+ skip_next = next_move_result;
+ continue;
+ }
+ }
+ }
+ break;
+ }
+ case Instruction::IF_EQZ:
+ case Instruction::IF_NEZ: {
+ uint32_t reg = inst->VRegA_21t();
+ int16_t offset = inst->VRegB_21t();
+ if (!count_types_ &&
+ Enabled(kExperimentSmallIf) &&
+ InstNibbles(opcode, {reg, static_cast<uint16_t>(offset)})) {
+ continue;
+ }
+ break;
+ }
+ case Instruction::INSTANCE_OF: {
+ uint32_t type_idx = inst->VRegC_22c();
+ uint32_t in_reg = inst->VRegB_22c();
+ uint32_t out_reg = inst->VRegB_22c();
+ if (count_types_) {
+ ++current_type.types_.FindOrAdd(type_idx)->second;
+ } else {
+ uint32_t local_type = current_type.types_.Get(type_idx);
+ ExtendPrefix(&in_reg, &local_type);
+ CHECK(InstNibbles(new_opcode, {in_reg, out_reg, local_type}));
+ continue;
+ }
+ break;
+ }
+ case Instruction::NEW_ARRAY: {
+ uint32_t len_reg = inst->VRegB_22c();
+ uint32_t type_idx = inst->VRegC_22c();
+ uint32_t out_reg = inst->VRegA_22c();
+ if (count_types_) {
+ ++current_type.types_.FindOrAdd(type_idx)->second;
+ } else {
+ uint32_t local_type = current_type.types_.Get(type_idx);
+ ExtendPrefix(&out_reg, &local_type);
+ CHECK(InstNibbles(new_opcode, {len_reg, out_reg, local_type}));
+ continue;
+ }
+ break;
+ }
+ case Instruction::CONST_CLASS:
+ case Instruction::CHECK_CAST:
+ case Instruction::NEW_INSTANCE: {
+ uint32_t type_idx = inst->VRegB_21c();
+ uint32_t out_reg = inst->VRegA_21c();
+ if (Enabled(kExperimentLocalType)) {
+ if (count_types_) {
+ ++current_type.types_.FindOrAdd(type_idx)->second;
+ } else {
+ bool next_is_init = false;
+ if (opcode == Instruction::NEW_INSTANCE && inst != code_item.end()) {
+ auto next = std::next(inst);
+ if (next->Opcode() == Instruction::INVOKE_DIRECT) {
+ uint32_t args[6] = {};
+ uint32_t arg_count = next->GetVarArgs(args);
+ uint32_t method_idx = DexMethodIndex(next.Inst());
+ if (arg_count == 1u &&
+ args[0] == out_reg &&
+ dex_file.GetMethodName(dex_file.GetMethodId(method_idx)) ==
+ std::string("<init>")) {
+ next_is_init = true;
+ }
+ }
+ }
+ uint32_t local_type = current_type.types_.Get(type_idx);
+ ExtendPrefix(&out_reg, &local_type);
+ CHECK(InstNibbles(opcode, {out_reg, local_type}));
+ skip_next = next_is_init;
+ continue;
+ }
+ }
+ break;
+ }
+ case Instruction::RETURN:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_WIDE:
+ case Instruction::RETURN_VOID: {
+ if (!count_types_ && Enabled(kExperimentReturn)) {
+ if (opcode == Instruction::RETURN_VOID || inst->VRegA_11x() == 0) {
+ if (InstNibbles(opcode, {})) {
+ continue;
+ }
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ if (!count_types_) {
+ Add(new_opcode, inst.Inst());
+ }
+ }
+ if (dump_) {
+ std::cout << std::endl
+ << "Bytecode size " << code_item.InsnsSizeInBytes() << " -> " << buffer_.size();
+ std::cout << std::endl;
+ }
+}
+
+void InstructionBuilder::Add(Instruction::Code opcode, const Instruction& inst) {
+ const uint8_t* start = reinterpret_cast<const uint8_t*>(&inst);
+ buffer_.push_back(opcode);
+ buffer_.insert(buffer_.end(), start + 1, start + 2 * inst.SizeInCodeUnits());
+}
+
+void InstructionBuilder::ExtendPrefix(uint32_t* value1, uint32_t* value2) {
+ if (*value1 < 16 && *value2 < 16) {
+ return;
+ }
+ if ((*value1 >> 4) == 1 && *value2 < 16) {
+ InstNibbles(0xE5, {});
+ *value1 ^= 1u << 4;
+ return;
+ } else if ((*value2 >> 4) == 1 && *value1 < 16) {
+ InstNibbles(0xE6, {});
+ *value2 ^= 1u << 4;
+ return;
+ }
+ if (*value1 < 256 && *value2 < 256) {
+ // Extend each value by 4 bits.
+ CHECK(InstNibbles(0xE3, {*value1 >> 4, *value2 >> 4}));
+ } else {
+ // Extend each value by 12 bits.
+ CHECK(InstNibbles(0xE4, {
+ (*value1 >> 12) & 0xF,
+ (*value1 >> 8) & 0xF,
+ (*value1 >> 4) & 0xF,
+ (*value2 >> 12) & 0xF,
+ (*value2 >> 8) & 0xF,
+ (*value2 >> 4) & 0xF}));
+ }
+ *value1 &= 0xF;
+ *value2 &= 0XF;
+}
+
+bool InstructionBuilder::InstNibblesAndIndex(uint8_t opcode,
+ uint16_t idx,
+ const std::vector<uint32_t>& args) {
+ if (!InstNibbles(opcode, args)) {
+ return false;
+ }
+ buffer_.push_back(static_cast<uint8_t>(idx >> 8));
+ buffer_.push_back(static_cast<uint8_t>(idx));
+ return true;
+}
+
+bool InstructionBuilder::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) {
+ if (dump_) {
+ std::cout << " ==> " << Instruction::Name(static_cast<Instruction::Code>(opcode)) << " ";
+ for (int v : args) {
+ std::cout << v << ", ";
+ }
+ }
+ for (int v : args) {
+ if (v >= 16) {
+ if (dump_) {
+ std::cout << "(OUT_OF_RANGE)";
+ }
+ return false;
+ }
+ }
+ buffer_.push_back(opcode);
+ for (size_t i = 0; i < args.size(); i += 2) {
+ buffer_.push_back(args[i] << 4);
+ if (i + 1 < args.size()) {
+ buffer_.back() |= args[i + 1];
+ }
+ }
+ while (buffer_.size() % alignment_ != 0) {
+ buffer_.push_back(0);
+ }
+ return true;
+}
+
+} // namespace dexanalyze
+} // namespace art
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
new file mode 100644
index 0000000000..e7c5e7b572
--- /dev/null
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_DEXANALYZE_DEXANALYZE_BYTECODE_H_
+#define ART_TOOLS_DEXANALYZE_DEXANALYZE_BYTECODE_H_
+
+#include <vector>
+#include <map>
+
+#include "base/safe_map.h"
+#include "dexanalyze_experiments.h"
+#include "dex/code_item_accessors.h"
+
+namespace art {
+namespace dexanalyze {
+
+enum BytecodeExperiment {
+ kExperimentInvoke,
+ kExperimentInstanceField,
+ kExperimentInstanceFieldSelf,
+ kExperimentStaticField,
+ kExperimentLocalType,
+ kExperimentReturn,
+ kExperimentSmallIf,
+ kExperimentString,
+ kExperimentSingleGetSet,
+};
+
+// Maps from global index to local index.
+struct TypeLinkage {
+ // Referenced types.
+ SafeMap<size_t, size_t> types_;
+ // Owned fields.
+ SafeMap<size_t, size_t> fields_;
+ // Owned methods.
+ SafeMap<size_t, size_t> methods_;
+ // Referenced strings.
+ SafeMap<size_t, size_t> strings_;
+};
+
+class InstructionBuilder {
+ public:
+ InstructionBuilder(std::map<size_t, TypeLinkage>& types,
+ bool count_types,
+ bool dump,
+ uint64_t experiments,
+ std::map<std::vector<uint8_t>, size_t>& instruction_freq);
+ void Process(const DexFile& dex_file,
+ const CodeItemDataAccessor& code_item,
+ dex::TypeIndex current_class_type);
+ void Add(Instruction::Code opcode, const Instruction& inst);
+ bool InstNibblesAndIndex(uint8_t opcode, uint16_t idx, const std::vector<uint32_t>& args);
+ bool InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args);
+ void ExtendPrefix(uint32_t* value1, uint32_t* value2);
+ bool Enabled(BytecodeExperiment experiment) const {
+ return experiments_ & (1u << static_cast<uint64_t>(experiment));
+ }
+
+ size_t alignment_ = 1u;
+ std::vector<uint8_t> buffer_;
+ // Global index -> local index maps.
+ std::map<size_t, TypeLinkage>& types_;
+ uint64_t missing_field_idx_count_ = 0u;
+ uint64_t missing_method_idx_count_ = 0u;
+ const bool count_types_;
+ const bool dump_;
+ uint64_t experiments_ = std::numeric_limits<uint64_t>::max();
+ std::map<std::vector<uint8_t>, size_t>& instruction_freq_;
+};
+
+class NewRegisterInstructions : public Experiment {
+ public:
+ explicit NewRegisterInstructions(uint64_t experiments) : experiments_(experiments) {}
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files);
+ void Dump(std::ostream& os, uint64_t total_size) const;
+
+ private:
+ uint64_t output_size_ = 0u;
+ uint64_t deduped_size_ = 0u;
+ uint64_t dex_code_bytes_ = 0u;
+ uint64_t missing_field_idx_count_ = 0u;
+ uint64_t missing_method_idx_count_ = 0u;
+ uint64_t experiments_ = std::numeric_limits<uint64_t>::max();
+ std::map<std::vector<uint8_t>, size_t> instruction_freq_;
+};
+
+} // namespace dexanalyze
+} // namespace art
+
+#endif // ART_TOOLS_DEXANALYZE_DEXANALYZE_BYTECODE_H_
diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc
index 244f45bbe6..b9a2ede97e 100644
--- a/tools/dexanalyze/dexanalyze_experiments.cc
+++ b/tools/dexanalyze/dexanalyze_experiments.cc
@@ -16,6 +16,7 @@
#include "dexanalyze_experiments.h"
+#include <algorithm>
#include <stdint.h>
#include <inttypes.h>
#include <iostream>
@@ -31,8 +32,9 @@
#include "dex/utf-inl.h"
namespace art {
+namespace dexanalyze {
-static inline bool IsRange(Instruction::Code code) {
+bool IsRange(Instruction::Code code) {
return code == Instruction::INVOKE_VIRTUAL_RANGE ||
code == Instruction::INVOKE_DIRECT_RANGE ||
code == Instruction::INVOKE_SUPER_RANGE ||
@@ -40,11 +42,11 @@ static inline bool IsRange(Instruction::Code code) {
code == Instruction::INVOKE_INTERFACE_RANGE;
}
-static inline uint16_t NumberOfArgs(const Instruction& inst) {
+uint16_t NumberOfArgs(const Instruction& inst) {
return IsRange(inst.Opcode()) ? inst.VRegA_3rc() : inst.VRegA_35c();
}
-static inline uint16_t DexMethodIndex(const Instruction& inst) {
+uint16_t DexMethodIndex(const Instruction& inst) {
return IsRange(inst.Opcode()) ? inst.VRegB_3rc() : inst.VRegB_35c();
}
@@ -69,7 +71,7 @@ std::string PercentDivide(uint64_t value, uint64_t max) {
static_cast<double>(value * 100) / static_cast<double>(max));
}
-static size_t PrefixLen(const std::string& a, const std::string& b) {
+size_t PrefixLen(const std::string& a, const std::string& b) {
size_t len = 0;
for (; len < a.length() && len < b.length() && a[len] == b[len]; ++len) {}
return len;
@@ -279,6 +281,33 @@ void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
}
+void CountDexIndices::ProcessDexFiles(
+ const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+ std::set<std::string> unique_field_names;
+ std::set<std::string> unique_method_names;
+ std::set<std::string> unique_type_names;
+ std::set<std::string> unique_mf_names;
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
+ unique_type_names.insert(
+ dex_file->StringDataByIdx(dex_file->GetTypeId(dex::TypeIndex(i)).descriptor_idx_));
+ }
+ for (size_t i = 0; i < dex_file->NumFieldIds(); ++i) {
+ unique_field_names.insert(dex_file->StringDataByIdx(dex_file->GetFieldId(i).name_idx_));
+ }
+ for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
+ unique_method_names.insert(dex_file->StringDataByIdx(dex_file->GetMethodId(i).name_idx_));
+ }
+ ProcessDexFile(*dex_file);
+ }
+ total_unique_method_names_ += unique_method_names.size();
+ total_unique_field_names_ += unique_field_names.size();
+ total_unique_type_names_ += unique_type_names.size();
+ unique_mf_names = unique_field_names;
+ unique_mf_names.insert(unique_method_names.begin(), unique_method_names.end());
+ total_unique_mf_names_ += unique_mf_names.size();
+}
+
void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
num_string_ids_ += dex_file.NumStringIds();
num_method_ids_ += dex_file.NumMethodIds();
@@ -286,13 +315,83 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
num_type_ids_ += dex_file.NumTypeIds();
num_class_defs_ += dex_file.NumClassDefs();
std::set<size_t> unique_code_items;
+
for (ClassAccessor accessor : dex_file.GetClasses()) {
std::set<size_t> unique_method_ids;
std::set<size_t> unique_string_ids;
+ // Types accessed and count.
+ std::map<size_t, size_t> types_accessed;
+
+ // Maps from dex field index -> class field index (static or instance).
+ std::map<uint32_t, uint32_t> static_field_index_map_;
+ size_t current_idx = 0u;
+ for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
+ static_field_index_map_[field.GetIndex()] = current_idx++;
+ }
+ std::map<uint32_t, uint32_t> instance_field_index_map_;
+ current_idx = 0u;
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+ instance_field_index_map_[field.GetIndex()] = current_idx++;
+ }
+ auto ProcessFieldIndex = [&](uint32_t dex_field_idx,
+ uint32_t inout,
+ const std::map<uint32_t, uint32_t>& index_map,
+ /*inout*/ FieldAccessStats* stats) {
+ auto it = index_map.find(dex_field_idx);
+ if (it != index_map.end()) {
+ if (it->second < FieldAccessStats::kMaxFieldIndex) {
+ ++stats->field_index_[it->second];
+ } else {
+ ++stats->field_index_other_;
+ }
+ } else {
+ ++stats->field_index_other_class_;
+ }
+ if (it != index_map.end() &&
+ it->second < FieldAccessStats::kShortBytecodeFieldIndexOutCutOff &&
+ inout < FieldAccessStats::kShortBytecodeInOutCutOff) {
+ ++stats->short_bytecode_;
+ }
+ };
+ auto ProcessInstanceField = [&](const Instruction& inst,
+ uint32_t first_arg_reg,
+ const std::map<uint32_t, uint32_t>& index_map,
+ /*inout*/ InstanceFieldAccessStats* stats) {
+ const uint32_t dex_field_idx = inst.VRegC_22c();
+ ++types_accessed[dex_file.GetFieldId(dex_field_idx).class_idx_.index_];
+ uint32_t input = inst.VRegA_22c();
+ ++stats->inout_[input];
+ const uint32_t receiver = inst.VRegB_22c();
+ // FIXME: This is weird if receiver < first_arg_reg.
+ ++stats->receiver_[(receiver - first_arg_reg) & 0xF];
+ if (first_arg_reg == receiver) {
+ ProcessFieldIndex(dex_field_idx, input, index_map, stats);
+ }
+ };
+ auto ProcessStaticField = [&](const Instruction& inst,
+ const std::map<uint32_t, uint32_t>& index_map,
+ /*inout*/ StaticFieldAccessStats* stats) {
+ const uint32_t dex_field_idx = inst.VRegB_21c();
+ ++types_accessed[dex_file.GetFieldId(dex_field_idx).class_idx_.index_];
+ uint8_t output = inst.VRegA_21c();
+ if (output < 16u) {
+ ++stats->inout_[output];
+ } else {
+ ++stats->inout_other_;
+ }
+ ProcessFieldIndex(dex_field_idx, output, index_map, stats);
+ };
+
for (const ClassAccessor::Method& method : accessor.GetMethods()) {
- dex_code_bytes_ += method.GetInstructions().InsnsSizeInBytes();
+ CodeItemDataAccessor code_item(dex_file, method.GetCodeItem());
+ const uint32_t first_arg_reg =
+ ((method.GetAccessFlags() & kAccStatic) == 0)
+ ? code_item.RegistersSize() - code_item.InsSize()
+ : static_cast<uint32_t>(-1);
+
+ dex_code_bytes_ += code_item.InsnsSizeInBytes();
unique_code_items.insert(method.GetCodeItemOffset());
- for (const DexInstructionPcPair& inst : method.GetInstructions()) {
+ for (const DexInstructionPcPair& inst : code_item) {
switch (inst->Opcode()) {
case Instruction::CONST_STRING: {
const dex::StringIndex string_index(inst->VRegB_21c());
@@ -300,6 +399,48 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
++num_string_ids_from_code_;
break;
}
+ case Instruction::IGET:
+ case Instruction::IGET_WIDE:
+ case Instruction::IGET_OBJECT:
+ case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BYTE:
+ case Instruction::IGET_CHAR:
+ case Instruction::IGET_SHORT: {
+ ProcessInstanceField(
+ inst.Inst(), first_arg_reg, instance_field_index_map_, &iget_stats_);
+ break;
+ }
+ case Instruction::IPUT:
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT: {
+ ProcessInstanceField(
+ inst.Inst(), first_arg_reg, instance_field_index_map_, &iput_stats_);
+ break;
+ }
+ case Instruction::SGET:
+ case Instruction::SGET_WIDE:
+ case Instruction::SGET_OBJECT:
+ case Instruction::SGET_BOOLEAN:
+ case Instruction::SGET_BYTE:
+ case Instruction::SGET_CHAR:
+ case Instruction::SGET_SHORT: {
+ ProcessStaticField(inst.Inst(), static_field_index_map_, &sget_stats_);
+ break;
+ }
+ case Instruction::SPUT:
+ case Instruction::SPUT_WIDE:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT: {
+ ProcessStaticField(inst.Inst(), static_field_index_map_, &sput_stats_);
+ break;
+ }
case Instruction::CONST_STRING_JUMBO: {
const dex::StringIndex string_index(inst->VRegB_31c());
unique_string_ids.insert(string_index.index_);
@@ -310,6 +451,7 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
case Instruction::INVOKE_VIRTUAL:
case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = DexMethodIndex(inst.Inst());
+ ++types_accessed[dex_file.GetMethodId(method_idx).class_idx_.index_];
if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) {
++same_class_virtual_;
}
@@ -320,6 +462,7 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
case Instruction::INVOKE_DIRECT:
case Instruction::INVOKE_DIRECT_RANGE: {
uint32_t method_idx = DexMethodIndex(inst.Inst());
+ ++types_accessed[dex_file.GetMethodId(method_idx).class_idx_.index_];
if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) {
++same_class_direct_;
}
@@ -330,6 +473,7 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE: {
uint32_t method_idx = DexMethodIndex(inst.Inst());
+ ++types_accessed[dex_file.GetMethodId(method_idx).class_idx_.index_];
if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) {
++same_class_static_;
}
@@ -340,6 +484,7 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE: {
uint32_t method_idx = DexMethodIndex(inst.Inst());
+ ++types_accessed[dex_file.GetMethodId(method_idx).class_idx_.index_];
if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) {
++same_class_interface_;
}
@@ -350,6 +495,7 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
case Instruction::INVOKE_SUPER:
case Instruction::INVOKE_SUPER_RANGE: {
uint32_t method_idx = DexMethodIndex(inst.Inst());
+ ++types_accessed[dex_file.GetMethodId(method_idx).class_idx_.index_];
if (dex_file.GetMethodId(method_idx).class_idx_ == accessor.GetClassIdx()) {
++same_class_super_;
}
@@ -357,18 +503,100 @@ void CountDexIndices::ProcessDexFile(const DexFile& dex_file) {
unique_method_ids.insert(method_idx);
break;
}
+ case Instruction::NEW_ARRAY: {
+ ++types_accessed[inst->VRegC_22c()];
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY: {
+ ++types_accessed[inst->VRegB_35c()];
+ break;
+ }
+ case Instruction::FILLED_NEW_ARRAY_RANGE: {
+ ++types_accessed[inst->VRegB_3rc()];
+ break;
+ }
+ case Instruction::CONST_CLASS:
+ case Instruction::CHECK_CAST:
+ case Instruction::NEW_INSTANCE: {
+ ++types_accessed[inst->VRegB_21c()];
+ break;
+ }
+ case Instruction::INSTANCE_OF: {
+ ++types_accessed[inst->VRegB_21c()];
+ break;
+ }
default:
break;
}
}
}
- total_unique_method_idx_ += unique_method_ids.size();
+ // Count uses of top 16n.
+ std::vector<size_t> uses;
+ for (auto&& p : types_accessed) {
+ uses.push_back(p.second);
+ }
+ std::sort(uses.rbegin(), uses.rend());
+ for (size_t i = 0; i < uses.size(); ++i) {
+ if (i < 16) {
+ uses_top_types_ += uses[i];
+ }
+ uses_all_types_ += uses[i];
+ }
+ total_unique_types_ += types_accessed.size();
+ total_unique_method_ids_ += unique_method_ids.size();
total_unique_string_ids_ += unique_string_ids.size();
}
total_unique_code_items_ += unique_code_items.size();
}
void CountDexIndices::Dump(std::ostream& os, uint64_t total_size) const {
+ auto DumpFieldIndexes = [&](const FieldAccessStats& stats) {
+ const uint64_t fields_idx_total = std::accumulate(
+ stats.field_index_,
+ stats.field_index_ + FieldAccessStats::kMaxFieldIndex,
+ stats.field_index_other_ + stats.field_index_other_class_);
+ for (size_t i = 0; i < FieldAccessStats::kMaxFieldIndex; ++i) {
+ os << " field_idx=" << i << ": " << Percent(stats.field_index_[i], fields_idx_total) << "\n";
+ }
+ os << " field_idx=other: " << Percent(stats.field_index_other_, fields_idx_total) << "\n";
+ os << " field_idx=other_class: " << Percent(stats.field_index_other_class_, fields_idx_total)
+ << "\n";
+ };
+ auto DumpInstanceFieldStats = [&](const char* tag, const InstanceFieldAccessStats& stats) {
+ const uint64_t fields_total = std::accumulate(stats.inout_, stats.inout_ + 16u, 0u);
+ os << tag << "\n";
+ for (size_t i = 0; i < 16; ++i) {
+ os << " receiver_reg=" << i << ": " << Percent(stats.receiver_[i], fields_total) << "\n";
+ }
+ DCHECK(tag[1] == 'G' || tag[1] == 'P');
+ const char* inout_tag = (tag[1] == 'G') ? "output_reg" : "input_reg";
+ for (size_t i = 0; i < 16; ++i) {
+ os << " " << inout_tag << "=" << i << ": " << Percent(stats.inout_[i], fields_total) << "\n";
+ }
+ DumpFieldIndexes(stats);
+ os << " short_bytecode: " << Percent(stats.short_bytecode_, fields_total) << "\n";
+ os << " short_bytecode_savings=" << Percent(stats.short_bytecode_ * 2, total_size) << "\n";
+ };
+ DumpInstanceFieldStats("IGET", iget_stats_);
+ DumpInstanceFieldStats("IPUT", iput_stats_);
+
+ auto DumpStaticFieldStats = [&](const char* tag, const StaticFieldAccessStats& stats) {
+ const uint64_t fields_total =
+ std::accumulate(stats.inout_, stats.inout_ + 16u, stats.inout_other_);
+ os << tag << "\n";
+ DCHECK(tag[1] == 'G' || tag[1] == 'P');
+ const char* inout_tag = (tag[1] == 'G') ? "output_reg" : "input_reg";
+ for (size_t i = 0; i < 16; ++i) {
+ os << " " << inout_tag << "=" << i << ": " << Percent(stats.inout_[i], fields_total) << "\n";
+ }
+ os << " " << inout_tag << "=other: " << Percent(stats.inout_other_, fields_total) << "\n";
+ DumpFieldIndexes(stats);
+ os << " short_bytecode: " << Percent(stats.short_bytecode_, fields_total) << "\n";
+ os << " short_bytecode_savings=" << Percent(stats.short_bytecode_ * 2, total_size) << "\n";
+ };
+ DumpStaticFieldStats("SGET", sget_stats_);
+ DumpStaticFieldStats("SPUT", sput_stats_);
+
os << "Num string ids: " << num_string_ids_ << "\n";
os << "Num method ids: " << num_method_ids_ << "\n";
os << "Num field ids: " << num_field_ids_ << "\n";
@@ -380,8 +608,18 @@ void CountDexIndices::Dump(std::ostream& os, uint64_t total_size) const {
os << "Interface same class: " << PercentDivide(same_class_interface_, total_interface_) << "\n";
os << "Super same class: " << PercentDivide(same_class_super_, total_super_) << "\n";
os << "Num strings accessed from code: " << num_string_ids_from_code_ << "\n";
- os << "Unique(per class) method ids accessed from code: " << total_unique_method_idx_ << "\n";
- os << "Unique(per class) string ids accessed from code: " << total_unique_string_ids_ << "\n";
+ os << "Avg unique methods accessed per class: "
+ << static_cast<double>(total_unique_method_ids_) / static_cast<double>(num_class_defs_) << "\n";
+ os << "Avg unique strings accessed per class: "
+ << static_cast<double>(total_unique_string_ids_) / static_cast<double>(num_class_defs_) << "\n";
+ os << "Avg unique types accessed per class " <<
+ static_cast<double>(total_unique_types_) / static_cast<double>(num_class_defs_) << "\n";
+ os << "Total unique methods accessed per class: "
+ << Percent(total_unique_method_ids_, total_size) << "\n";
+ os << "Total unique strings accessed per class: "
+ << Percent(total_unique_string_ids_, total_size) << "\n";
+ os << "Total unique types accessed per class: "
+ << Percent(total_unique_types_, total_size) << "\n";
const size_t same_class_total =
same_class_direct_ +
same_class_virtual_ +
@@ -394,8 +632,15 @@ void CountDexIndices::Dump(std::ostream& os, uint64_t total_size) const {
total_static_ +
total_interface_ +
total_super_;
+ os << "Unique method names: " << Percent(total_unique_method_names_, num_field_ids_) << "\n";
+ os << "Unique field names: " << Percent(total_unique_field_names_, num_method_ids_) << "\n";
+ os << "Unique type names: " << Percent(total_unique_type_names_, num_type_ids_) << "\n";
+ os << "Unique method/field names: "
+ << Percent(total_unique_mf_names_, num_field_ids_ + num_method_ids_) << "\n";
os << "Same class invokes: " << PercentDivide(same_class_total, other_class_total) << "\n";
os << "Invokes from code: " << (same_class_total + other_class_total) << "\n";
+ os << "Type uses on top types: " << PercentDivide(uses_top_types_, uses_all_types_) << "\n";
+ os << "Type uses 1b savings: " << PercentDivide(uses_top_types_, total_size) << "\n";
os << "Total Dex code bytes: " << Percent(dex_code_bytes_, total_size) << "\n";
os << "Total unique code items: " << total_unique_code_items_ << "\n";
os << "Total Dex size: " << total_size << "\n";
@@ -420,7 +665,7 @@ void CodeMetrics::ProcessDexFile(const DexFile& dex_file) {
}
case Instruction::MOVE_RESULT:
case Instruction::MOVE_RESULT_OBJECT: {
- if (space_for_out_arg) {
+ if (space_for_out_arg && inst->VRegA_11x() < 16) {
move_result_savings_ += inst->SizeInCodeUnits() * 2;
}
break;
@@ -441,8 +686,9 @@ void CodeMetrics::Dump(std::ostream& os, uint64_t total_size) const {
}
os << "Move result savings: " << Percent(move_result_savings_, total_size) << "\n";
os << "One byte invoke savings: " << Percent(total, total_size) << "\n";
- const uint64_t low_arg_total = std::accumulate(arg_counts_, arg_counts_ + 3, 0u);
+ const uint64_t low_arg_total = std::accumulate(arg_counts_, arg_counts_ + 2, 0u);
os << "Low arg savings: " << Percent(low_arg_total * 2, total_size) << "\n";
}
+} // namespace dexanalyze
} // namespace art
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 2be53d6216..468b74bc00 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -17,15 +17,31 @@
#ifndef ART_TOOLS_DEXANALYZE_DEXANALYZE_EXPERIMENTS_H_
#define ART_TOOLS_DEXANALYZE_DEXANALYZE_EXPERIMENTS_H_
+#include <cstdint>
#include <iosfwd>
#include <memory>
#include <set>
#include <vector>
+#include "base/macros.h"
+#include "dex/dex_instruction.h"
+
namespace art {
class DexFile;
+namespace dexanalyze {
+
+bool IsRange(Instruction::Code code);
+
+uint16_t NumberOfArgs(const Instruction& inst);
+
+uint16_t DexMethodIndex(const Instruction& inst);
+
+std::string PercentDivide(uint64_t value, uint64_t max);
+
+size_t PrefixLen(const std::string& a, const std::string& b);
+
std::string Percent(uint64_t value, uint64_t max);
// An experiment a stateful visitor that runs on dex files. Results are cumulative.
@@ -35,13 +51,15 @@ class Experiment {
virtual void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files);
virtual void ProcessDexFile(const DexFile&) {}
virtual void Dump(std::ostream& os, uint64_t total_size) const = 0;
+
+ bool dump_ = false;
};
// Analyze string data and strings accessed from code.
class AnalyzeStrings : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file);
- void Dump(std::ostream& os, uint64_t total_size) const;
+ void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+ void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
private:
int64_t wide_string_bytes_ = 0u;
@@ -57,8 +75,8 @@ class AnalyzeStrings : public Experiment {
// Analyze debug info sizes.
class AnalyzeDebugInfo : public Experiment {
public:
- void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files);
- void Dump(std::ostream& os, uint64_t total_size) const;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
+ void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
private:
int64_t total_bytes_ = 0u;
@@ -83,17 +101,48 @@ class AnalyzeDebugInfo : public Experiment {
// Count numbers of dex indices.
class CountDexIndices : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file);
+ void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
void Dump(std::ostream& os, uint64_t total_size) const;
private:
// Total string ids loaded from dex code.
size_t num_string_ids_from_code_ = 0;
- size_t total_unique_method_idx_ = 0;
+ size_t total_unique_method_ids_ = 0;
size_t total_unique_string_ids_ = 0;
uint64_t total_unique_code_items_ = 0u;
+ struct FieldAccessStats {
+ static constexpr size_t kMaxFieldIndex = 32;
+ uint64_t field_index_[kMaxFieldIndex] = {};
+ uint64_t field_index_other_ = 0u;
+ uint64_t field_index_other_class_ = 0u; // Includes superclass fields referenced with
+ // type index pointing to this class.
+
+ static constexpr size_t kShortBytecodeFieldIndexOutCutOff = 16u;
+ static constexpr size_t kShortBytecodeInOutCutOff = 16u;
+ uint64_t short_bytecode_ = 0u;
+
+ uint64_t inout_[16] = {}; // Input for IPUT/SPUT, output for IGET/SGET.
+ };
+ struct InstanceFieldAccessStats : FieldAccessStats {
+ uint64_t receiver_[16] = {};
+ };
+ struct StaticFieldAccessStats : FieldAccessStats {
+ uint64_t inout_other_ = 0u; // Input for SPUT, output for SGET.
+ };
+ InstanceFieldAccessStats iget_stats_;
+ InstanceFieldAccessStats iput_stats_;
+ StaticFieldAccessStats sget_stats_;
+ StaticFieldAccessStats sput_stats_;
+
+ // Unique names.
+ uint64_t total_unique_method_names_ = 0u;
+ uint64_t total_unique_field_names_ = 0u;
+ uint64_t total_unique_type_names_ = 0u;
+ uint64_t total_unique_mf_names_ = 0u;
+
// Other dex ids.
size_t dex_code_bytes_ = 0;
size_t num_string_ids_ = 0;
@@ -113,14 +162,19 @@ class CountDexIndices : public Experiment {
size_t total_interface_ = 0;
size_t same_class_super_ = 0;
size_t total_super_ = 0;
+
+ // Type usage.
+ uint64_t uses_top_types_ = 0u;
+ uint64_t uses_all_types_ = 0u;
+ uint64_t total_unique_types_ = 0u;
};
// Measure various code metrics including args per invoke-virtual, fill/spill move patterns.
class CodeMetrics : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file);
+ void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
- void Dump(std::ostream& os, uint64_t total_size) const;
+ void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
private:
static constexpr size_t kMaxArgCount = 6;
@@ -128,6 +182,7 @@ class CodeMetrics : public Experiment {
uint64_t move_result_savings_ = 0u;
};
+} // namespace dexanalyze
} // namespace art
#endif // ART_TOOLS_DEXANALYZE_DEXANALYZE_EXPERIMENTS_H_
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 97e7f4cf3c..c252a9b4ae 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -16,7 +16,8 @@
#include <fstream>
#include <iostream>
-#include <unordered_set>
+#include <map>
+#include <set>
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
@@ -61,37 +62,82 @@ NO_RETURN static void Usage(const char* fmt, ...) {
va_end(ap);
UsageError("Command: %s", CommandLine().c_str());
- UsageError("Usage: hiddenapi [options]...");
+ UsageError("Usage: hiddenapi [command_name] [options]...");
UsageError("");
- UsageError(" --dex=<filename>: specify dex file whose members' access flags are to be set.");
- UsageError(" At least one --dex parameter must be specified.");
+ UsageError(" Command \"encode\": encode API list membership in boot dex files");
+ UsageError(" --dex=<filename>: dex file which belongs to boot class path,");
+ UsageError(" the file will be overwritten");
UsageError("");
- UsageError(" --light-greylist=<filename>:");
- UsageError(" --dark-greylist=<filename>:");
- UsageError(" --blacklist=<filename>: text files with signatures of methods/fields to be marked");
- UsageError(" greylisted/blacklisted respectively. At least one list must be provided.");
+ UsageError(" --light-greylist=<filename>:");
+ UsageError(" --dark-greylist=<filename>:");
+ UsageError(" --blacklist=<filename>:");
+ UsageError(" text files with signatures of methods/fields to be annotated");
UsageError("");
- UsageError(" --print-hidden-api: dump a list of marked methods/fields to the standard output.");
- UsageError(" There is no indication which API category they belong to.");
+ UsageError(" Command \"list\": dump lists of public and private API");
+ UsageError(" --boot-dex=<filename>: dex file which belongs to boot class path");
+ UsageError(" --stub-dex=<filename>: dex/apk file which belongs to SDK API stubs");
+ UsageError("");
+ UsageError(" --out-public=<filename>: output file for a list of all public APIs");
+ UsageError(" --out-private=<filename>: output file for a list of all private APIs");
UsageError("");
exit(EXIT_FAILURE);
}
+template<typename E>
+static bool Contains(const std::vector<E>& vec, const E& elem) {
+ return std::find(vec.begin(), vec.end(), elem) != vec.end();
+}
+
class DexClass {
public:
DexClass(const DexFile& dex_file, uint32_t idx)
: dex_file_(dex_file), class_def_(dex_file.GetClassDef(idx)) {}
const DexFile& GetDexFile() const { return dex_file_; }
+ const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
+ const dex::TypeIndex GetSuperclassIndex() const { return class_def_.superclass_idx_; }
- const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
+ bool HasSuperclass() const { return dex_file_.IsTypeIndexValid(GetSuperclassIndex()); }
- const char* GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+ std::string GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+
+ std::string GetSuperclassDescriptor() const {
+ if (HasSuperclass()) {
+ return dex_file_.StringByTypeIdx(GetSuperclassIndex());
+ } else {
+ return "";
+ }
+ }
+
+ std::set<std::string> GetInterfaceDescriptors() const {
+ std::set<std::string> list;
+ const DexFile::TypeList* ifaces = dex_file_.GetInterfacesList(class_def_);
+ for (uint32_t i = 0; ifaces != nullptr && i < ifaces->Size(); ++i) {
+ list.insert(dex_file_.StringByTypeIdx(ifaces->GetTypeItem(i).type_idx_));
+ }
+ return list;
+ }
+
+ inline bool IsVisible() const { return HasAccessFlags(kAccPublic); }
+
+ inline bool Equals(const DexClass& other) const {
+ bool equals = GetDescriptor() == other.GetDescriptor();
+ if (equals) {
+ // TODO(dbrazdil): Check that methods/fields match as well once b/111116543 is fixed.
+ CHECK_EQ(GetAccessFlags(), other.GetAccessFlags());
+ CHECK_EQ(GetSuperclassDescriptor(), other.GetSuperclassDescriptor());
+ CHECK(GetInterfaceDescriptors() == other.GetInterfaceDescriptors());
+ }
+ return equals;
+ }
private:
+ uint32_t GetAccessFlags() const { return class_def_.access_flags_; }
+ bool HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+
const DexFile& dex_file_;
const DexFile::ClassDef& class_def_;
};
@@ -100,10 +146,12 @@ class DexMember {
public:
DexMember(const DexClass& klass, const ClassDataItemIterator& it)
: klass_(klass), it_(it) {
- DCHECK_EQ(it_.IsAtMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
+ DCHECK_EQ(IsMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
klass_.GetClassIndex());
}
+ inline const DexClass& GetDeclaringClass() const { return klass_; }
+
// Sets hidden bits in access flags and writes them back into the DEX in memory.
// Note that this will not update the cached data of ClassDataItemIterator
// until it iterates over this item again and therefore will fail a CHECK if
@@ -117,7 +165,7 @@ class DexMember {
// `ptr` initially points to the next ClassData item. We iterate backwards
// until we hit the terminating byte of the previous Leb128 value.
const uint8_t* ptr = it_.DataPointer();
- if (it_.IsAtMethod()) {
+ if (IsMethod()) {
ptr = ReverseSearchUnsignedLeb128(ptr);
DCHECK_EQ(DecodeUnsignedLeb128WithoutMovingCursor(ptr), it_.GetMethodCodeItemOffset());
}
@@ -128,35 +176,57 @@ class DexMember {
UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
}
- // Returns true if this member's API entry is in `list`.
- bool IsOnApiList(const std::unordered_set<std::string>& list) const {
- return list.find(GetApiEntry()) != list.end();
+ inline bool IsMethod() const { return it_.IsAtMethod(); }
+ inline bool IsVirtualMethod() const { return it_.IsAtVirtualMethod(); }
+
+ // Returns true if the member is public/protected and is in a public class.
+ inline bool IsVisible() const {
+ return GetDeclaringClass().IsVisible() &&
+ (HasAccessFlags(kAccPublic) || HasAccessFlags(kAccProtected));
}
// Constructs a string with a unique signature of this class member.
std::string GetApiEntry() const {
std::stringstream ss;
- ss << klass_.GetDescriptor() << "->";
- if (it_.IsAtMethod()) {
- const DexFile::MethodId& mid = GetMethodId();
- ss << klass_.GetDexFile().GetMethodName(mid)
- << klass_.GetDexFile().GetMethodSignature(mid).ToString();
- } else {
- const DexFile::FieldId& fid = GetFieldId();
- ss << klass_.GetDexFile().GetFieldName(fid) << ":"
- << klass_.GetDexFile().GetFieldTypeDescriptor(fid);
- }
+ ss << klass_.GetDescriptor() << "->" << GetName() << (IsMethod() ? "" : ":") << GetSignature();
return ss.str();
}
+ inline bool operator==(const DexMember& other) {
+ // These need to match if they should resolve to one another.
+ bool equals = IsMethod() == other.IsMethod() &&
+ GetName() == other.GetName() &&
+ GetSignature() == other.GetSignature();
+
+ // Sanity checks if they do match.
+ if (equals) {
+ CHECK_EQ(IsVirtualMethod(), other.IsVirtualMethod());
+ }
+
+ return equals;
+ }
+
private:
+ inline uint32_t GetAccessFlags() const { return it_.GetMemberAccessFlags(); }
+ inline uint32_t HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+
+ inline std::string GetName() const {
+ return IsMethod() ? klass_.GetDexFile().GetMethodName(GetMethodId())
+ : klass_.GetDexFile().GetFieldName(GetFieldId());
+ }
+
+ inline std::string GetSignature() const {
+ return IsMethod() ? klass_.GetDexFile().GetMethodSignature(GetMethodId()).ToString()
+ : klass_.GetDexFile().GetFieldTypeDescriptor(GetFieldId());
+ }
+
inline const DexFile::MethodId& GetMethodId() const {
- DCHECK(it_.IsAtMethod());
+ DCHECK(IsMethod());
return klass_.GetDexFile().GetMethodId(it_.GetMemberIndex());
}
inline const DexFile::FieldId& GetFieldId() const {
- DCHECK(!it_.IsAtMethod());
+ DCHECK(!IsMethod());
return klass_.GetDexFile().GetFieldId(it_.GetMemberIndex());
}
@@ -164,215 +234,474 @@ class DexMember {
const ClassDataItemIterator& it_;
};
-class HiddenApi FINAL {
+class ClassPath FINAL {
public:
- HiddenApi() : print_hidden_api_(false) {}
-
- void ParseArgs(int argc, char** argv) {
- original_argc = argc;
- original_argv = argv;
+ ClassPath(const std::vector<std::string>& dex_paths, bool open_writable) {
+ OpenDexFiles(dex_paths, open_writable);
+ }
- android::base::InitLogging(argv);
+ template<typename Fn>
+ void ForEachDexClass(Fn fn) {
+ for (auto& dex_file : dex_files_) {
+ for (uint32_t class_idx = 0; class_idx < dex_file->NumClassDefs(); ++class_idx) {
+ DexClass klass(*dex_file, class_idx);
+ fn(klass);
+ }
+ }
+ }
- // Skip over the command name.
- argv++;
- argc--;
+ template<typename Fn>
+ void ForEachDexMember(Fn fn) {
+ ForEachDexClass([&fn](DexClass& klass) {
+ const uint8_t* klass_data = klass.GetData();
+ if (klass_data != nullptr) {
+ for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
+ DexMember member(klass, it);
+ fn(member);
+ }
+ }
+ });
+ }
- if (argc == 0) {
- Usage("No arguments specified");
+ void UpdateDexChecksums() {
+ for (auto& dex_file : dex_files_) {
+ // Obtain a writeable pointer to the dex header.
+ DexFile::Header* header = const_cast<DexFile::Header*>(&dex_file->GetHeader());
+ // Recalculate checksum and overwrite the value in the header.
+ header->checksum_ = dex_file->CalculateChecksum();
}
+ }
- for (int i = 0; i < argc; ++i) {
- const StringPiece option(argv[i]);
- const bool log_options = false;
- if (log_options) {
- LOG(INFO) << "hiddenapi: option[" << i << "]=" << argv[i];
+ private:
+ void OpenDexFiles(const std::vector<std::string>& dex_paths, bool open_writable) {
+ ArtDexFileLoader dex_loader;
+ std::string error_msg;
+
+ if (open_writable) {
+ for (const std::string& filename : dex_paths) {
+ File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+ CHECK_NE(fd.Fd(), -1) << "Unable to open file '" << filename << "': " << strerror(errno);
+
+ // Memory-map the dex file with MAP_SHARED flag so that changes in memory
+ // propagate to the underlying file. We run dex file verification as if
+ // the dex file was not in boot claass path to check basic assumptions,
+ // such as that at most one of public/private/protected flag is set.
+ // We do those checks here and skip them when loading the processed file
+ // into boot class path.
+ std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
+ /* location */ filename,
+ /* verify */ true,
+ /* verify_checksum */ true,
+ /* mmap_shared */ true,
+ &error_msg));
+ CHECK(dex_file.get() != nullptr) << "Open failed for '" << filename << "' " << error_msg;
+ CHECK(dex_file->IsStandardDexFile()) << "Expected a standard dex file '" << filename << "'";
+ CHECK(dex_file->EnableWrite())
+ << "Failed to enable write permission for '" << filename << "'";
+ dex_files_.push_back(std::move(dex_file));
}
- if (option == "--print-hidden-api") {
- print_hidden_api_ = true;
- } else if (option.starts_with("--dex=")) {
- dex_paths_.push_back(option.substr(strlen("--dex=")).ToString());
- } else if (option.starts_with("--light-greylist=")) {
- light_greylist_path_ = option.substr(strlen("--light-greylist=")).ToString();
- } else if (option.starts_with("--dark-greylist=")) {
- dark_greylist_path_ = option.substr(strlen("--dark-greylist=")).ToString();
- } else if (option.starts_with("--blacklist=")) {
- blacklist_path_ = option.substr(strlen("--blacklist=")).ToString();
- } else {
- Usage("Unknown argument '%s'", option.data());
+ } else {
+ for (const std::string& filename : dex_paths) {
+ bool success = dex_loader.Open(filename.c_str(),
+ /* location */ filename,
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_msg,
+ &dex_files_);
+ CHECK(success) << "Open failed for '" << filename << "' " << error_msg;
}
}
}
- bool ProcessDexFiles() {
- if (dex_paths_.empty()) {
- Usage("No DEX files specified");
- }
+ // Opened dex files. Note that these are opened as `const` but may be written into.
+ std::vector<std::unique_ptr<const DexFile>> dex_files_;
+};
- if (light_greylist_path_.empty() && dark_greylist_path_.empty() && blacklist_path_.empty()) {
- Usage("No API file specified");
- }
+class HierarchyClass FINAL {
+ public:
+ HierarchyClass() {}
- if (!light_greylist_path_.empty() && !OpenApiFile(light_greylist_path_, &light_greylist_)) {
- return false;
- }
+ void AddDexClass(const DexClass& klass) {
+ CHECK(dex_classes_.empty() || klass.Equals(dex_classes_.front()));
+ dex_classes_.push_back(klass);
+ }
- if (!dark_greylist_path_.empty() && !OpenApiFile(dark_greylist_path_, &dark_greylist_)) {
- return false;
- }
+ void AddExtends(HierarchyClass& parent) {
+ CHECK(!Contains(extends_, &parent));
+ CHECK(!Contains(parent.extended_by_, this));
+ extends_.push_back(&parent);
+ parent.extended_by_.push_back(this);
+ }
- if (!blacklist_path_.empty() && !OpenApiFile(blacklist_path_, &blacklist_)) {
- return false;
- }
+ const DexClass& GetOneDexClass() const {
+ CHECK(!dex_classes_.empty());
+ return dex_classes_.front();
+ }
- MemMap::Init();
- if (!OpenDexFiles()) {
- return false;
- }
+ // See comment on Hierarchy::ForEachResolvableMember.
+ template<typename Fn>
+ bool ForEachResolvableMember(const DexMember& other, Fn fn) {
+ return ForEachResolvableMember_Impl(other, fn) != ResolutionResult::kNotFound;
+ }
- DCHECK(!dex_files_.empty());
- for (auto& dex_file : dex_files_) {
- CategorizeAllClasses(*dex_file.get());
+ private:
+ // Result of resolution which takes into account whether the member was found
+ // for the first time or not. This is just a performance optimization to prevent
+ // re-visiting previously visited members.
+ // Note that order matters. When accumulating results, we always pick the maximum.
+ enum class ResolutionResult {
+ kNotFound,
+ kFoundOld,
+ kFoundNew,
+ };
+
+ inline ResolutionResult Accumulate(ResolutionResult a, ResolutionResult b) {
+ return static_cast<ResolutionResult>(
+ std::max(static_cast<unsigned>(a), static_cast<unsigned>(b)));
+ }
+
+ template<typename Fn>
+ ResolutionResult ForEachResolvableMember_Impl(const DexMember& other, Fn fn) {
+ // First try to find a member matching `other` in this class.
+ ResolutionResult foundInClass = ForEachMatchingMember(other, fn);
+
+ switch (foundInClass) {
+ case ResolutionResult::kFoundOld:
+ // A matching member was found and previously explored. All subclasses
+ // must have been explored too.
+ break;
+
+ case ResolutionResult::kFoundNew:
+ // A matching member was found and this was the first time it was visited.
+ // If it is a virtual method, visit all methods overriding/implementing it too.
+ if (other.IsVirtualMethod()) {
+ for (HierarchyClass* subclass : extended_by_) {
+ subclass->ForEachOverridingMember(other, fn);
+ }
+ }
+ break;
+
+ case ResolutionResult::kNotFound:
+ // A matching member was not found in this class. Explore the superclasses
+ // and implemented interfaces.
+ for (HierarchyClass* superclass : extends_) {
+ foundInClass = Accumulate(
+ foundInClass, superclass->ForEachResolvableMember_Impl(other, fn));
+ }
+ break;
}
- UpdateDexChecksums();
- return true;
+ return foundInClass;
}
- private:
- bool OpenApiFile(const std::string& path, std::unordered_set<std::string>* list) {
- DCHECK(list->empty());
- DCHECK(!path.empty());
-
- std::ifstream api_file(path, std::ifstream::in);
- if (api_file.fail()) {
- LOG(ERROR) << "Unable to open file '" << path << "' " << strerror(errno);
- return false;
+ template<typename Fn>
+ ResolutionResult ForEachMatchingMember(const DexMember& other, Fn fn) {
+ ResolutionResult found = ResolutionResult::kNotFound;
+ for (const DexClass& dex_class : dex_classes_) {
+ const uint8_t* data = dex_class.GetData();
+ if (data != nullptr) {
+ for (ClassDataItemIterator it(dex_class.GetDexFile(), data); it.HasNext(); it.Next()) {
+ DexMember member(dex_class, it);
+ if (member == other) {
+ found = Accumulate(found, fn(member) ? ResolutionResult::kFoundNew
+ : ResolutionResult::kFoundOld);
+ }
+ }
+ }
}
+ return found;
+ }
- for (std::string line; std::getline(api_file, line);) {
- list->insert(line);
+ template<typename Fn>
+ void ForEachOverridingMember(const DexMember& other, Fn fn) {
+ CHECK(other.IsVirtualMethod());
+ ResolutionResult found = ForEachMatchingMember(other, fn);
+ if (found == ResolutionResult::kFoundOld) {
+ // No need to explore further.
+ return;
+ } else {
+ for (HierarchyClass* subclass : extended_by_) {
+ subclass->ForEachOverridingMember(other, fn);
+ }
}
+ }
- api_file.close();
- return true;
+ // DexClass entries of this class found across all the provided dex files.
+ std::vector<DexClass> dex_classes_;
+
+ // Classes which this class inherits, or interfaces which it implements.
+ std::vector<HierarchyClass*> extends_;
+
+ // Classes which inherit from this class.
+ std::vector<HierarchyClass*> extended_by_;
+};
+
+class Hierarchy FINAL {
+ public:
+ explicit Hierarchy(ClassPath& class_path) : class_path_(class_path) {
+ BuildClassHierarchy();
}
- bool OpenDexFiles() {
- ArtDexFileLoader dex_loader;
- DCHECK(dex_files_.empty());
+ // Perform an operation for each member of the hierarchy which could potentially
+ // be the result of method/field resolution of `other`.
+ // The function `fn` should accept a DexMember reference and return true if
+ // the member was changed. This drives a performance optimization which only
+ // visits overriding members the first time the overridden member is visited.
+ // Returns true if at least one resolvable member was found.
+ template<typename Fn>
+ bool ForEachResolvableMember(const DexMember& other, Fn fn) {
+ HierarchyClass* klass = FindClass(other.GetDeclaringClass().GetDescriptor());
+ return (klass != nullptr) && klass->ForEachResolvableMember(other, fn);
+ }
- for (const std::string& filename : dex_paths_) {
- std::string error_msg;
+ private:
+ HierarchyClass* FindClass(const std::string& descriptor) {
+ auto it = classes_.find(descriptor);
+ if (it == classes_.end()) {
+ return nullptr;
+ } else {
+ return &it->second;
+ }
+ }
- File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
- if (fd.Fd() == -1) {
- LOG(ERROR) << "Unable to open file '" << filename << "': " << strerror(errno);
- return false;
+ void BuildClassHierarchy() {
+ // Create one HierarchyClass entry in `classes_` per class descriptor
+ // and add all DexClass objects with the same descriptor to that entry.
+ class_path_.ForEachDexClass([this](DexClass& klass) {
+ classes_[klass.GetDescriptor()].AddDexClass(klass);
+ });
+
+ // Connect each HierarchyClass to its successors and predecessors.
+ for (auto& entry : classes_) {
+ HierarchyClass& klass = entry.second;
+ const DexClass& dex_klass = klass.GetOneDexClass();
+
+ if (!dex_klass.HasSuperclass()) {
+ CHECK(dex_klass.GetInterfaceDescriptors().empty())
+ << "java/lang/Object should not implement any interfaces";
+ continue;
}
- // Memory-map the dex file with MAP_SHARED flag so that changes in memory
- // propagate to the underlying file. We run dex file verification as if
- // the dex file was not in boot claass path to check basic assumptions,
- // such as that at most one of public/private/protected flag is set.
- // We do those checks here and skip them when loading the processed file
- // into boot class path.
- std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
- /* location */ filename,
- /* verify */ true,
- /* verify_checksum */ true,
- /* mmap_shared */ true,
- &error_msg));
- if (dex_file.get() == nullptr) {
- LOG(ERROR) << "Open failed for '" << filename << "' " << error_msg;
- return false;
- }
+ HierarchyClass* superclass = FindClass(dex_klass.GetSuperclassDescriptor());
+ CHECK(superclass != nullptr);
+ klass.AddExtends(*superclass);
- if (!dex_file->IsStandardDexFile()) {
- LOG(ERROR) << "Expected a standard dex file '" << filename << "'";
- return false;
+ for (const std::string& iface_desc : dex_klass.GetInterfaceDescriptors()) {
+ HierarchyClass* iface = FindClass(iface_desc);
+ CHECK(iface != nullptr);
+ klass.AddExtends(*iface);
}
+ }
+ }
- // Change the protection of the memory mapping to read-write.
- if (!dex_file->EnableWrite()) {
- LOG(ERROR) << "Failed to enable write permission for '" << filename << "'";
- return false;
- }
+ ClassPath& class_path_;
+ std::map<std::string, HierarchyClass> classes_;
+};
- dex_files_.push_back(std::move(dex_file));
+class HiddenApi FINAL {
+ public:
+ HiddenApi() {}
+
+ void Run(int argc, char** argv) {
+ switch (ParseArgs(argc, argv)) {
+ case Command::kEncode:
+ EncodeAccessFlags();
+ break;
+ case Command::kList:
+ ListApi();
+ break;
}
- return true;
}
- void CategorizeAllClasses(const DexFile& dex_file) {
- for (uint32_t class_idx = 0; class_idx < dex_file.NumClassDefs(); ++class_idx) {
- DexClass klass(dex_file, class_idx);
- const uint8_t* klass_data = klass.GetData();
- if (klass_data == nullptr) {
- continue;
- }
+ private:
+ enum class Command {
+ kEncode,
+ kList,
+ };
- for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
- DexMember member(klass, it);
-
- // Catagorize member and overwrite its access flags.
- // Note that if a member appears on multiple API lists, it will be categorized
- // as the strictest.
- bool is_hidden = true;
- if (member.IsOnApiList(blacklist_)) {
- member.SetHidden(HiddenApiAccessFlags::kBlacklist);
- } else if (member.IsOnApiList(dark_greylist_)) {
- member.SetHidden(HiddenApiAccessFlags::kDarkGreylist);
- } else if (member.IsOnApiList(light_greylist_)) {
- member.SetHidden(HiddenApiAccessFlags::kLightGreylist);
- } else {
- member.SetHidden(HiddenApiAccessFlags::kWhitelist);
- is_hidden = false;
- }
+ Command ParseArgs(int argc, char** argv) {
+ // Skip over the binary's path.
+ argv++;
+ argc--;
- if (print_hidden_api_ && is_hidden) {
- std::cout << member.GetApiEntry() << std::endl;
+ if (argc > 0) {
+ const StringPiece command(argv[0]);
+ if (command == "encode") {
+ for (int i = 1; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ if (option.starts_with("--dex=")) {
+ boot_dex_paths_.push_back(option.substr(strlen("--dex=")).ToString());
+ } else if (option.starts_with("--light-greylist=")) {
+ light_greylist_path_ = option.substr(strlen("--light-greylist=")).ToString();
+ } else if (option.starts_with("--dark-greylist=")) {
+ dark_greylist_path_ = option.substr(strlen("--dark-greylist=")).ToString();
+ } else if (option.starts_with("--blacklist=")) {
+ blacklist_path_ = option.substr(strlen("--blacklist=")).ToString();
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
+ }
+ return Command::kEncode;
+ } else if (command == "list") {
+ for (int i = 1; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ if (option.starts_with("--boot-dex=")) {
+ boot_dex_paths_.push_back(option.substr(strlen("--boot-dex=")).ToString());
+ } else if (option.starts_with("--stub-dex=")) {
+ stub_dex_paths_.push_back(option.substr(strlen("--stub-dex=")).ToString());
+ } else if (option.starts_with("--out-public=")) {
+ out_public_path_ = option.substr(strlen("--out-public=")).ToString();
+ } else if (option.starts_with("--out-private=")) {
+ out_private_path_ = option.substr(strlen("--out-private=")).ToString();
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
}
+ return Command::kList;
+ } else {
+ Usage("Unknown command '%s'", command.data());
}
+ } else {
+ Usage("No command specified");
}
}
- void UpdateDexChecksums() {
- for (auto& dex_file : dex_files_) {
- // Obtain a writeable pointer to the dex header.
- DexFile::Header* header = const_cast<DexFile::Header*>(&dex_file->GetHeader());
- // Recalculate checksum and overwrite the value in the header.
- header->checksum_ = dex_file->CalculateChecksum();
+ void EncodeAccessFlags() {
+ if (boot_dex_paths_.empty()) {
+ Usage("No boot DEX files specified");
+ }
+
+ // Load dex signatures.
+ std::map<std::string, HiddenApiAccessFlags::ApiList> api_list;
+ OpenApiFile(light_greylist_path_, api_list, HiddenApiAccessFlags::kLightGreylist);
+ OpenApiFile(dark_greylist_path_, api_list, HiddenApiAccessFlags::kDarkGreylist);
+ OpenApiFile(blacklist_path_, api_list, HiddenApiAccessFlags::kBlacklist);
+
+ // Open all dex files.
+ ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ true);
+
+ // Set access flags of all members.
+ boot_class_path.ForEachDexMember([&api_list](DexMember& boot_member) {
+ auto it = api_list.find(boot_member.GetApiEntry());
+ if (it == api_list.end()) {
+ boot_member.SetHidden(HiddenApiAccessFlags::kWhitelist);
+ } else {
+ boot_member.SetHidden(it->second);
+ }
+ });
+
+ boot_class_path.UpdateDexChecksums();
+ }
+
+ void OpenApiFile(const std::string& path,
+ std::map<std::string, HiddenApiAccessFlags::ApiList>& api_list,
+ HiddenApiAccessFlags::ApiList membership) {
+ if (path.empty()) {
+ return;
}
+
+ std::ifstream api_file(path, std::ifstream::in);
+ CHECK(!api_file.fail()) << "Unable to open file '" << path << "' " << strerror(errno);
+
+ for (std::string line; std::getline(api_file, line);) {
+ CHECK(api_list.find(line) == api_list.end())
+ << "Duplicate entry: " << line << " (" << api_list[line] << " and " << membership << ")";
+ api_list.emplace(line, membership);
+ }
+ api_file.close();
}
- // Print signatures of APIs which have been grey-/blacklisted.
- bool print_hidden_api_;
+ void ListApi() {
+ if (boot_dex_paths_.empty()) {
+ Usage("No boot DEX files specified");
+ } else if (stub_dex_paths_.empty()) {
+ Usage("No stub DEX files specified");
+ } else if (out_public_path_.empty()) {
+ Usage("No public API output path specified");
+ } else if (out_private_path_.empty()) {
+ Usage("No private API output path specified");
+ }
+
+ // Complete list of boot class path members. The associated boolean states
+ // whether it is public (true) or private (false).
+ std::map<std::string, bool> boot_members;
+
+ // Deduplicate errors before printing them.
+ std::set<std::string> unresolved;
+
+ // Open all dex files.
+ ClassPath stub_class_path(stub_dex_paths_, /* open_writable */ false);
+ ClassPath boot_class_path(boot_dex_paths_, /* open_writable */ false);
+ Hierarchy boot_hierarchy(boot_class_path);
+
+ // Mark all boot dex members private.
+ boot_class_path.ForEachDexMember([&boot_members](DexMember& boot_member) {
+ boot_members[boot_member.GetApiEntry()] = false;
+ });
+
+ // Resolve each SDK dex member against the framework and mark it white.
+ stub_class_path.ForEachDexMember(
+ [&boot_hierarchy, &boot_members, &unresolved](DexMember& stub_member) {
+ if (!stub_member.IsVisible()) {
+ // Typically fake constructors and inner-class `this` fields.
+ return;
+ }
+ bool resolved = boot_hierarchy.ForEachResolvableMember(
+ stub_member,
+ [&boot_members](DexMember& boot_member) {
+ std::string entry = boot_member.GetApiEntry();
+ auto it = boot_members.find(entry);
+ CHECK(it != boot_members.end());
+ if (it->second) {
+ return false; // has been marked before
+ } else {
+ boot_members.insert(it, std::make_pair(entry, true));
+ return true; // marked for the first time
+ }
+ });
+ if (!resolved) {
+ unresolved.insert(stub_member.GetApiEntry());
+ }
+ });
+
+ // Print errors.
+ for (const std::string& str : unresolved) {
+ LOG(WARNING) << "unresolved: " << str;
+ }
+
+ // Write into public/private API files.
+ std::ofstream file_public(out_public_path_.c_str());
+ std::ofstream file_private(out_private_path_.c_str());
+ for (const std::pair<std::string, bool> entry : boot_members) {
+ if (entry.second) {
+ file_public << entry.first << std::endl;
+ } else {
+ file_private << entry.first << std::endl;
+ }
+ }
+ file_public.close();
+ file_private.close();
+ }
// Paths to DEX files which should be processed.
- std::vector<std::string> dex_paths_;
+ std::vector<std::string> boot_dex_paths_;
+ std::vector<std::string> stub_dex_paths_;
// Paths to text files which contain the lists of API members.
std::string light_greylist_path_;
std::string dark_greylist_path_;
std::string blacklist_path_;
- // Opened DEX files. Note that these are opened as `const` but eventually will be written into.
- std::vector<std::unique_ptr<const DexFile>> dex_files_;
-
- // Signatures of DEX members loaded from `light_greylist_path_`, `dark_greylist_path_`,
- // `blacklist_path_`.
- std::unordered_set<std::string> light_greylist_;
- std::unordered_set<std::string> dark_greylist_;
- std::unordered_set<std::string> blacklist_;
+ // Paths to text files to which we will output list of all API members.
+ std::string out_public_path_;
+ std::string out_private_path_;
};
} // namespace art
int main(int argc, char** argv) {
- art::HiddenApi hiddenapi;
-
- // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
- hiddenapi.ParseArgs(argc, argv);
- return hiddenapi.ProcessDexFiles() ? EXIT_SUCCESS : EXIT_FAILURE;
+ android::base::InitLogging(argv);
+ art::MemMap::Init();
+ art::HiddenApi().Run(argc, argv);
+ return EXIT_SUCCESS;
}
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index ed880e0e92..aa87f21e7f 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -66,15 +66,18 @@ class HiddenApiTest : public CommonRuntimeTest {
std::vector<std::string> argv_str;
argv_str.push_back(GetHiddenApiCmd());
argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
+ argv_str.push_back("encode");
argv_str.push_back("--dex=" + out_dex->GetFilename());
argv_str.push_back("--light-greylist=" + light_greylist.GetFilename());
argv_str.push_back("--dark-greylist=" + dark_greylist.GetFilename());
argv_str.push_back("--blacklist=" + blacklist.GetFilename());
int return_code = ExecAndReturnCode(argv_str, &error);
- if (return_code != 0) {
- LOG(FATAL) << "HiddenApi binary exited with unexpected return code " << return_code;
+ if (return_code == 0) {
+ return OpenDex(*out_dex);
+ } else {
+ LOG(ERROR) << "HiddenApi binary exited with unexpected return code " << return_code;
+ return nullptr;
}
- return OpenDex(*out_dex);
}
std::unique_ptr<const DexFile> OpenDex(const ScratchFile& file) {
@@ -226,6 +229,7 @@ TEST_F(HiddenApiTest, InstanceFieldNoMatch) {
OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIFieldHiddenFlags(*dex_file));
}
@@ -235,6 +239,7 @@ TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIFieldHiddenFlags(*dex_file));
}
@@ -244,6 +249,7 @@ TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
}
@@ -253,6 +259,7 @@ TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
}
@@ -262,7 +269,7 @@ TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch2) {
@@ -271,7 +278,7 @@ TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch3) {
@@ -280,7 +287,7 @@ TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticFieldNoMatch) {
@@ -289,6 +296,7 @@ TEST_F(HiddenApiTest, StaticFieldNoMatch) {
OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSFieldHiddenFlags(*dex_file));
}
@@ -298,6 +306,7 @@ TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSFieldHiddenFlags(*dex_file));
}
@@ -307,6 +316,7 @@ TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
}
@@ -316,6 +326,7 @@ TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
}
@@ -325,7 +336,7 @@ TEST_F(HiddenApiTest, StaticFieldTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticFieldTwoListsMatch2) {
@@ -334,7 +345,7 @@ TEST_F(HiddenApiTest, StaticFieldTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticFieldTwoListsMatch3) {
@@ -343,7 +354,7 @@ TEST_F(HiddenApiTest, StaticFieldTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
@@ -352,6 +363,7 @@ TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIMethodHiddenFlags(*dex_file));
}
@@ -361,6 +373,7 @@ TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIMethodHiddenFlags(*dex_file));
}
@@ -370,6 +383,7 @@ TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
}
@@ -379,6 +393,7 @@ TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
}
@@ -388,7 +403,7 @@ TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch2) {
@@ -397,7 +412,7 @@ TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch3) {
@@ -406,7 +421,7 @@ TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticMethodNoMatch) {
@@ -415,6 +430,7 @@ TEST_F(HiddenApiTest, StaticMethodNoMatch) {
OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSMethodHiddenFlags(*dex_file));
}
@@ -424,6 +440,7 @@ TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSMethodHiddenFlags(*dex_file));
}
@@ -433,6 +450,7 @@ TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
}
@@ -442,6 +460,7 @@ TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
}
@@ -451,7 +470,7 @@ TEST_F(HiddenApiTest, StaticMethodTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticMethodTwoListsMatch2) {
@@ -460,7 +479,7 @@ TEST_F(HiddenApiTest, StaticMethodTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticMethodTwoListsMatch3) {
@@ -469,7 +488,7 @@ TEST_F(HiddenApiTest, StaticMethodTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
@@ -478,6 +497,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetINMethodHiddenFlags(*dex_file));
}
@@ -487,6 +507,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetINMethodHiddenFlags(*dex_file));
}
@@ -496,6 +517,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
}
@@ -505,6 +527,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
}
@@ -514,7 +537,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch2) {
@@ -523,7 +546,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch3) {
@@ -532,7 +555,7 @@ TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
@@ -541,6 +564,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSNMethodHiddenFlags(*dex_file));
}
@@ -550,6 +574,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSNMethodHiddenFlags(*dex_file));
}
@@ -559,6 +584,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
}
@@ -568,6 +594,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_NE(dex_file.get(), nullptr);
ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
}
@@ -577,7 +604,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch1) {
OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch2) {
@@ -586,7 +613,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch2) {
OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch3) {
@@ -595,7 +622,7 @@ TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch3) {
OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
- ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+ ASSERT_EQ(dex_file.get(), nullptr);
}
} // namespace art
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 0796432f68..eb33da2267 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -251,6 +251,9 @@ else
vm_args="${vm_args} --vm-arg -Djpda.settings.debuggeeAgentName=${with_jdwp_path}"
fi
vm_args="$vm_args --vm-arg -Xcompiler-option --vm-arg --debuggable"
+ # we don't want to be trying to connect to adbconnection which might not have
+ # been built.
+ vm_args="${vm_args} --vm-arg -XjdwpProvider:none"
# Make sure the debuggee doesn't clean up what the debugger has generated.
art_debugee="$art_debugee --no-clean"
fi
@@ -327,6 +330,10 @@ if [[ $mode != "ri" ]]; then
if [[ "x$with_jdwp_path" == "x" ]]; then
# Need to enable the internal jdwp implementation.
art_debugee="${art_debugee} -XjdwpProvider:internal"
+ else
+ # need to disable the jdwpProvider since we give the agent explicitly on the
+ # cmdline.
+ art_debugee="${art_debugee} -XjdwpProvider:none"
fi
else
toolchain_args="--toolchain javac --language CUR"
diff --git a/tools/teardown-buildbot-device.sh b/tools/teardown-buildbot-device.sh
index bf14ca4f9f..be68b9f490 100755
--- a/tools/teardown-buildbot-device.sh
+++ b/tools/teardown-buildbot-device.sh
@@ -25,62 +25,118 @@ adb root
adb wait-for-device
if [[ -n "$ART_TEST_CHROOT" ]]; then
-
- # remove_filesystem_from_chroot DIR-IN-CHROOT FSTYPE REMOVE-DIR-IN-CHROOT
- # -----------------------------------------------------------------------
- # Unmount filesystem with type FSTYPE mounted in directory DIR-IN-CHROOT
- # under the chroot directory.
- # Remove DIR-IN-CHROOT under the chroot if REMOVE-DIR-IN-CHROOT is
- # true.
- remove_filesystem_from_chroot() {
- local dir_in_chroot=$1
- local fstype=$2
- local remove_dir=$3
- local dir="$ART_TEST_CHROOT/$dir_in_chroot"
- adb shell test -d "$dir" \
- && adb shell mount | grep -q "^$fstype on $dir type $fstype " \
- && if adb shell umount "$dir"; then
- $remove_dir && adb shell rmdir "$dir"
- else
- adb shell lsof "$dir"
- fi
- }
-
- # Tear down the chroot dir.
- echo -e "${green}Tear down the chroot dir in $ART_TEST_CHROOT${nc}"
-
# Check that ART_TEST_CHROOT is correctly defined.
[[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
- # Remove /dev from chroot.
- remove_filesystem_from_chroot dev tmpfs true
-
- # Remove /sys/kernel/debug from chroot.
- # The /sys/kernel/debug directory under the chroot dir cannot be
- # deleted, as it is part of the host device's /sys filesystem.
- remove_filesystem_from_chroot sys/kernel/debug debugfs false
- # Remove /sys from chroot.
- remove_filesystem_from_chroot sys sysfs true
-
- # Remove /proc from chroot.
- remove_filesystem_from_chroot proc proc true
-
- # Remove /etc from chroot.
- adb shell rm -f "$ART_TEST_CHROOT/etc"
- adb shell rm -rf "$ART_TEST_CHROOT/system/etc"
-
- # Remove directories used for ART testing in chroot.
- adb shell rm -rf "$ART_TEST_CHROOT/data/local/tmp"
- adb shell rm -rf "$ART_TEST_CHROOT/data/dalvik-cache"
- adb shell rm -rf "$ART_TEST_CHROOT/tmp"
-
- # Remove property_contexts file(s) from chroot.
- property_context_files="/property_contexts \
- /system/etc/selinux/plat_property_contexts \
- /vendor/etc/selinux/nonplat_property_context \
- /plat_property_contexts \
- /nonplat_property_contexts"
- for f in $property_context_files; do
- adb shell rm -f "$ART_TEST_CHROOT$f"
- done
+ if adb shell test -d "$ART_TEST_CHROOT"; then
+ # Display users of the chroot dir.
+
+ echo -e "${green}List open files under chroot dir $ART_TEST_CHROOT${nc}"
+ adb shell lsof | grep "$ART_TEST_CHROOT"
+
+ # for_all_chroot_process ACTION
+ # -----------------------------
+ # Execute ACTION on all processes running from binaries located
+ # under the chroot directory. ACTION is passed two arguments: the
+ # PID of the process, and a string containing the command line
+ # that started this process.
+ for_all_chroot_process() {
+ local action=$1
+ for link in $(adb shell ls -d "/proc/*/root"); do
+ local root=$(adb shell readlink "$link")
+ if [[ "x$root" = "x$ART_TEST_CHROOT" ]]; then
+ local dir=$(dirname "$link")
+ local pid=$(basename "$dir")
+ local cmdline=$(adb shell cat "$dir"/cmdline | tr '\000' ' ')
+ $action "$pid" "$cmdline"
+ fi
+ done
+ }
+
+ # display_process PID CMDLINE
+ # ---------------------------
+ # Display information about process with given PID, that was started with CMDLINE.
+ display_process() {
+ local pid=$1
+ local cmdline=$2
+ echo "$cmdline (PID: $pid)"
+ }
+
+ echo -e "${green}List processes running from binaries under chroot dir $ART_TEST_CHROOT${nc}"
+ for_all_chroot_process display_process
+
+ # Tear down the chroot dir.
+
+ echo -e "${green}Tear down the chroot set up in $ART_TEST_CHROOT${nc}"
+
+ # remove_filesystem_from_chroot DIR-IN-CHROOT FSTYPE REMOVE-DIR-IN-CHROOT
+ # -----------------------------------------------------------------------
+ # Unmount filesystem with type FSTYPE mounted in directory DIR-IN-CHROOT
+ # under the chroot directory.
+ # Remove DIR-IN-CHROOT under the chroot if REMOVE-DIR-IN-CHROOT is
+ # true.
+ remove_filesystem_from_chroot() {
+ local dir_in_chroot=$1
+ local fstype=$2
+ local remove_dir=$3
+ local dir="$ART_TEST_CHROOT/$dir_in_chroot"
+ adb shell test -d "$dir" \
+ && adb shell mount | grep -q "^$fstype on $dir type $fstype " \
+ && if adb shell umount "$dir"; then
+ $remove_dir && adb shell rmdir "$dir"
+ else
+ echo "Files still open in $dir:"
+ adb shell lsof | grep "$dir"
+ fi
+ }
+
+ # Remove /dev from chroot.
+ remove_filesystem_from_chroot dev tmpfs true
+
+ # Remove /sys/kernel/debug from chroot.
+ # The /sys/kernel/debug directory under the chroot dir cannot be
+ # deleted, as it is part of the host device's /sys filesystem.
+ remove_filesystem_from_chroot sys/kernel/debug debugfs false
+ # Remove /sys from chroot.
+ remove_filesystem_from_chroot sys sysfs true
+
+ # Remove /proc from chroot.
+ remove_filesystem_from_chroot proc proc true
+
+ # Remove /etc from chroot.
+ adb shell rm -f "$ART_TEST_CHROOT/etc"
+ adb shell rm -rf "$ART_TEST_CHROOT/system/etc"
+
+ # Remove directories used for ART testing in chroot.
+ adb shell rm -rf "$ART_TEST_CHROOT/data/local/tmp"
+ adb shell rm -rf "$ART_TEST_CHROOT/data/dalvik-cache"
+ adb shell rm -rf "$ART_TEST_CHROOT/tmp"
+
+ # Remove property_contexts file(s) from chroot.
+ property_context_files="/property_contexts \
+ /system/etc/selinux/plat_property_contexts \
+ /vendor/etc/selinux/nonplat_property_context \
+ /plat_property_contexts \
+ /nonplat_property_contexts"
+ for f in $property_context_files; do
+ adb shell rm -f "$ART_TEST_CHROOT$f"
+ done
+
+
+ # Kill processes still running in the chroot.
+
+ # kill_process PID CMDLINE
+ # ------------------------
+ # Kill process with given PID, that was started with CMDLINE.
+ kill_process() {
+ local pid=$1
+ local cmdline=$2
+ echo "Killing $cmdline (PID: $pid)"
+ adb shell kill -9 "$pid"
+ }
+
+ echo -e "${green}Kill processes still running from binaries under" \
+ "chroot dir $ART_TEST_CHROOT (if any)${nc} "
+ for_all_chroot_process kill_process
+ fi
fi
diff --git a/tools/ti-fast/tifast.cc b/tools/ti-fast/tifast.cc
index 428304e517..b147addfd5 100644
--- a/tools/ti-fast/tifast.cc
+++ b/tools/ti-fast/tifast.cc
@@ -32,6 +32,10 @@ namespace tifast {
namespace {
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
static void AddCapsForEvent(jvmtiEvent event, jvmtiCapabilities* caps) {
switch (event) {
#define DO_CASE(name, cap_name) \
@@ -135,6 +139,17 @@ static std::vector<jvmtiEvent> GetRequestedEventList(const std::string& args) {
return res;
}
+static jint SetupJvmtiEnv(JavaVM* vm, jvmtiEnv** jvmti) {
+ jint res = 0;
+ res = vm->GetEnv(reinterpret_cast<void**>(jvmti), JVMTI_VERSION_1_1);
+
+ if (res != JNI_OK || *jvmti == nullptr) {
+ LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+ return vm->GetEnv(reinterpret_cast<void**>(jvmti), kArtTiVersion);
+ }
+ return res;
+}
+
} // namespace
static jint AgentStart(JavaVM* vm,
@@ -142,14 +157,9 @@ static jint AgentStart(JavaVM* vm,
void* reserved ATTRIBUTE_UNUSED) {
jvmtiEnv* jvmti = nullptr;
jvmtiError error = JVMTI_ERROR_NONE;
- {
- jint res = 0;
- res = vm->GetEnv(reinterpret_cast<void**>(&jvmti), JVMTI_VERSION_1_1);
-
- if (res != JNI_OK || jvmti == nullptr) {
- LOG(ERROR) << "Unable to access JVMTI, error code " << res;
- return JNI_ERR;
- }
+ if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
+ LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+ return JNI_ERR;
}
std::string args(options);
bool is_log = false;
diff --git a/tools/veridex/Android.mk b/tools/veridex/Android.mk
index f8463c1c33..2faa577262 100644
--- a/tools/veridex/Android.mk
+++ b/tools/veridex/Android.mk
@@ -22,13 +22,13 @@ LOCAL_PATH := $(call my-dir)
system_stub_dex := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/core_dex_intermediates/classes.dex
$(system_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000
$(system_stub_dex): $(call resolve-prebuilt-sdk-jar-path,system_current) | $(ZIP2ZIP) $(DX)
- $(transform-classes-d8.jar-to-dex)
+ $(transform-classes.jar-to-dex)
oahl_stub_dex := $(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/oahl_dex_intermediates/classes.dex
$(oahl_stub_dex): PRIVATE_MIN_SDK_VERSION := 1000
$(oahl_stub_dex): $(call get-prebuilt-sdk-dir,current)/org.apache.http.legacy.jar | $(ZIP2ZIP) $(DX)
- $(transform-classes-d8.jar-to-dex)
+ $(transform-classes.jar-to-dex)
app_compat_lists := \
$(INTERNAL_PLATFORM_HIDDENAPI_LIGHT_GREYLIST) \
diff --git a/tools/veridex/flow_analysis.cc b/tools/veridex/flow_analysis.cc
index 154c60f6ac..d4f7e5f91d 100644
--- a/tools/veridex/flow_analysis.cc
+++ b/tools/veridex/flow_analysis.cc
@@ -739,14 +739,15 @@ RegisterValue FlowAnalysisSubstitutor::AnalyzeInvoke(const Instruction& instruct
MethodReference method(&resolver_->GetDexFile(), id);
// TODO: doesn't work for multidex
// TODO: doesn't work for overriding (but maybe should be done at a higher level);
- if (accesses_.find(method) == accesses_.end()) {
+ auto method_accesses_it = accesses_.find(method);
+ if (method_accesses_it == accesses_.end()) {
return GetReturnType(id);
}
uint32_t args[5];
if (!is_range) {
instruction.GetVarArgs(args);
}
- for (const ReflectAccessInfo& info : accesses_.at(method)) {
+ for (const ReflectAccessInfo& info : method_accesses_it->second) {
if (info.cls.IsParameter() || info.name.IsParameter()) {
RegisterValue cls = info.cls.IsParameter()
? GetRegister(GetParameterAt(instruction, is_range, args, info.cls.GetParameterIndex()))
diff --git a/tools/wrapagentproperties/wrapagentproperties.cc b/tools/wrapagentproperties/wrapagentproperties.cc
index 8b4b062cf5..39cb20acf2 100644
--- a/tools/wrapagentproperties/wrapagentproperties.cc
+++ b/tools/wrapagentproperties/wrapagentproperties.cc
@@ -139,9 +139,10 @@ struct ExtraJvmtiInterface : public jvmtiInterface_1_ {
static jvmtiError WrapGetSystemProperty(jvmtiEnv* env, const char* prop, char** out) {
ExtraJvmtiInterface* funcs = reinterpret_cast<ExtraJvmtiInterface*>(
const_cast<jvmtiInterface_1_*>(env->functions));
- if (funcs->proxy_vm->map->find(prop) != funcs->proxy_vm->map->end()) {
+ auto it = funcs->proxy_vm->map->find(prop);
+ if (it != funcs->proxy_vm->map->end()) {
+ const std::string& val = it->second;
std::string str_prop(prop);
- const std::string& val = funcs->proxy_vm->map->at(str_prop);
jvmtiError res = env->Allocate(val.size() + 1, reinterpret_cast<unsigned char**>(out));
if (res != JVMTI_ERROR_NONE) {
return res;
@@ -198,8 +199,9 @@ struct ExtraJvmtiInterface : public jvmtiInterface_1_ {
if (res != JVMTI_ERROR_NONE) {
return res;
}
- if (funcs->proxy_vm->map->find(prop) != funcs->proxy_vm->map->end()) {
- funcs->proxy_vm->map->at(prop) = val;
+ auto it = funcs->proxy_vm->map->find(prop);
+ if (it != funcs->proxy_vm->map->end()) {
+ it->second = val;
}
return JVMTI_ERROR_NONE;
}
@@ -245,7 +247,7 @@ enum class StartType {
static jint CallNextAgent(StartType start,
ProxyJavaVM* vm,
- std::string options,
+ const std::string& options,
void* reserved) {
// TODO It might be good to set it up so that the library is unloaded even if no jvmtiEnv's are
// created but this isn't expected to be common so we will just not bother.