summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Android.mk1
-rw-r--r--build/Android.gtest.mk2
-rw-r--r--build/Android.oat.mk2
-rw-r--r--cmdline/cmdline_parser_test.cc31
-rw-r--r--cmdline/cmdline_types.h18
-rw-r--r--compiler/Android.mk4
-rw-r--r--compiler/compiler.h5
-rw-r--r--compiler/dex/mir_graph.cc15
-rw-r--r--compiler/dex/pass.h13
-rw-r--r--compiler/dex/pass_driver.h3
-rw-r--r--compiler/dex/quick/arm/int_arm.cc33
-rw-r--r--compiler/dex/quick/arm/target_arm.cc3
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc29
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc51
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc3
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc29
-rw-r--r--compiler/dex/quick/codegen_util.cc14
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc9
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h1
-rw-r--r--compiler/dex/quick/gen_common.cc8
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc36
-rw-r--r--compiler/dex/quick/lazy_debug_frame_opcode_writer.h4
-rw-r--r--compiler/dex/quick/mips/call_mips.cc5
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc23
-rw-r--r--compiler/dex/quick/mips/int_mips.cc94
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc36
-rw-r--r--compiler/dex/quick/mir_to_lir.cc3
-rw-r--r--compiler/dex/quick/mir_to_lir.h3
-rw-r--r--compiler/dex/quick/quick_compiler.cc8
-rw-r--r--compiler/dex/quick/ralloc_util.cc9
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc4
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc12
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc72
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc12
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc24
-rw-r--r--compiler/dex/verification_results.cc1
-rw-r--r--compiler/driver/compiler_driver-inl.h4
-rw-r--r--compiler/driver/compiler_driver.cc16
-rw-r--r--compiler/driver/compiler_driver.h4
-rw-r--r--compiler/dwarf/debug_frame_opcode_writer.h15
-rw-r--r--compiler/dwarf/debug_info_entry_writer.h33
-rw-r--r--compiler/dwarf/debug_line_opcode_writer.h15
-rw-r--r--compiler/dwarf/headers.h40
-rw-r--r--compiler/dwarf/writer.h15
-rw-r--r--compiler/gc_map_builder.h6
-rw-r--r--compiler/image_writer.cc6
-rw-r--r--compiler/jit/jit_compiler.cc33
-rw-r--r--compiler/jni/jni_cfi_test_expected.inc214
-rw-r--r--compiler/jni/quick/jni_compiler.cc3
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc21
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc5
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc3
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.cc5
-rw-r--r--compiler/linker/arm/relative_patcher_thumb2.h4
-rw-r--r--compiler/oat_writer.cc59
-rw-r--r--compiler/oat_writer.h13
-rw-r--r--compiler/optimizing/boolean_simplifier.cc37
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc18
-rw-r--r--compiler/optimizing/bounds_check_elimination_test.cc56
-rw-r--r--compiler/optimizing/builder.cc70
-rw-r--r--compiler/optimizing/builder.h3
-rw-r--r--compiler/optimizing/code_generator.cc80
-rw-r--r--compiler/optimizing/code_generator.h9
-rw-r--r--compiler/optimizing/code_generator_arm.cc359
-rw-r--r--compiler/optimizing/code_generator_arm.h9
-rw-r--r--compiler/optimizing/code_generator_arm64.cc88
-rw-r--r--compiler/optimizing/code_generator_mips64.cc127
-rw-r--r--compiler/optimizing/code_generator_mips64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc398
-rw-r--r--compiler/optimizing/code_generator_x86.h7
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc277
-rw-r--r--compiler/optimizing/code_generator_x86_64.h10
-rw-r--r--compiler/optimizing/codegen_test.cc132
-rw-r--r--compiler/optimizing/common_arm64.h4
-rw-r--r--compiler/optimizing/constant_area_fixups_x86.cc132
-rw-r--r--compiler/optimizing/constant_folding_test.cc22
-rw-r--r--compiler/optimizing/dead_code_elimination.cc4
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc2
-rw-r--r--compiler/optimizing/dominator_test.cc10
-rw-r--r--compiler/optimizing/find_loops_test.cc22
-rw-r--r--compiler/optimizing/graph_checker.cc50
-rw-r--r--compiler/optimizing/graph_checker.h17
-rw-r--r--compiler/optimizing/graph_checker_test.cc8
-rw-r--r--compiler/optimizing/graph_test.cc20
-rw-r--r--compiler/optimizing/graph_visualizer.cc11
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/gvn_test.cc51
-rw-r--r--compiler/optimizing/induction_var_analysis.cc12
-rw-r--r--compiler/optimizing/inliner.cc5
-rw-r--r--compiler/optimizing/instruction_simplifier.cc128
-rw-r--r--compiler/optimizing/intrinsics.cc3
-rw-r--r--compiler/optimizing/intrinsics.h74
-rw-r--r--compiler/optimizing/intrinsics_arm.cc302
-rw-r--r--compiler/optimizing/intrinsics_arm.h7
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc1
-rw-r--r--compiler/optimizing/intrinsics_list.h1
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc359
-rw-r--r--compiler/optimizing/intrinsics_x86.cc18
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc298
-rw-r--r--compiler/optimizing/licm_test.cc12
-rw-r--r--compiler/optimizing/live_ranges_test.cc4
-rw-r--r--compiler/optimizing/locations.h4
-rw-r--r--compiler/optimizing/nodes.cc95
-rw-r--r--compiler/optimizing/nodes.h373
-rw-r--r--compiler/optimizing/nodes_x86.h44
-rw-r--r--compiler/optimizing/optimizing_compiler.cc43
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h5
-rw-r--r--compiler/optimizing/parallel_move_resolver.cc2
-rw-r--r--compiler/optimizing/parallel_move_test.cc3
-rw-r--r--compiler/optimizing/pretty_printer.h2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc40
-rw-r--r--compiler/optimizing/register_allocator.cc62
-rw-r--r--compiler/optimizing/register_allocator_test.cc20
-rw-r--r--compiler/optimizing/side_effects_analysis.cc3
-rw-r--r--compiler/optimizing/ssa_builder.cc8
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h5
-rw-r--r--compiler/optimizing/stack_map_stream.cc2
-rw-r--r--compiler/optimizing/stack_map_stream.h9
-rw-r--r--compiler/optimizing/suspend_check_test.cc2
-rw-r--r--compiler/utils/arm/assembler_arm.h9
-rw-r--r--compiler/utils/arm/assembler_arm32.cc14
-rw-r--r--compiler/utils/arm/assembler_arm32.h4
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc13
-rw-r--r--compiler/utils/arm/assembler_thumb2.h5
-rw-r--r--compiler/utils/arm/constants_arm.h3
-rw-r--r--compiler/utils/array_ref.h16
-rw-r--r--compiler/utils/assembler.cc7
-rw-r--r--compiler/utils/assembler.h6
-rw-r--r--compiler/utils/assembler_test.h97
-rw-r--r--compiler/utils/assembler_test_base.h68
-rw-r--r--compiler/utils/assembler_thumb_test.cc3
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc241
-rw-r--r--compiler/utils/label.h7
-rw-r--r--compiler/utils/mips/assembler_mips.cc1874
-rw-r--r--compiler/utils/mips/assembler_mips.h512
-rw-r--r--compiler/utils/mips/assembler_mips_test.cc1324
-rw-r--r--compiler/utils/x86/assembler_x86.cc24
-rw-r--r--compiler/utils/x86/assembler_x86.h103
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc21
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h69
-rw-r--r--dex2oat/dex2oat.cc2
-rw-r--r--disassembler/disassembler_mips.cc18
-rw-r--r--oatdump/oatdump.cc48
-rw-r--r--runtime/Android.mk3
-rw-r--r--runtime/arch/arch_test.cc2
-rw-r--r--runtime/arch/arm/context_arm.cc4
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc8
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S105
-rw-r--r--runtime/arch/arm64/context_arm64.cc4
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc8
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc2
-rw-r--r--runtime/arch/instruction_set.h16
-rw-r--r--runtime/arch/mips/context_mips.cc4
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc8
-rw-r--r--runtime/arch/mips64/context_mips64.cc4
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc8
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S69
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S188
-rw-r--r--runtime/arch/stub_test.cc481
-rw-r--r--runtime/arch/x86/context_x86.cc7
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc8
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S197
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc7
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc10
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S207
-rw-r--r--runtime/art_code.cc333
-rw-r--r--runtime/art_code.h93
-rw-r--r--runtime/art_method-inl.h108
-rw-r--r--runtime/art_method.cc255
-rw-r--r--runtime/art_method.h148
-rw-r--r--runtime/asm_support.h65
-rw-r--r--runtime/base/allocator.cc6
-rw-r--r--runtime/base/allocator.h7
-rw-r--r--runtime/base/arena_allocator.cc25
-rw-r--r--runtime/base/arena_allocator.h79
-rw-r--r--runtime/base/arena_containers.h41
-rw-r--r--runtime/base/bit_vector.cc4
-rw-r--r--runtime/base/dchecked_vector.h228
-rw-r--r--runtime/base/debug_stack.h4
-rw-r--r--runtime/base/hash_map.h18
-rw-r--r--runtime/base/hash_set.h201
-rw-r--r--runtime/base/hash_set_test.cc18
-rw-r--r--runtime/base/memory_tool.h3
-rw-r--r--runtime/base/mutex.cc12
-rw-r--r--runtime/base/mutex.h10
-rw-r--r--runtime/base/scoped_arena_allocator.cc5
-rw-r--r--runtime/base/scoped_arena_allocator.h17
-rw-r--r--runtime/base/scoped_arena_containers.h22
-rw-r--r--runtime/check_reference_map_visitor.h7
-rw-r--r--runtime/class_linker.cc1170
-rw-r--r--runtime/class_linker.h125
-rw-r--r--runtime/class_table.cc4
-rw-r--r--runtime/common_runtime_test.cc3
-rw-r--r--runtime/debugger.cc17
-rw-r--r--runtime/debugger.h1
-rw-r--r--runtime/dex_file_verifier.cc11
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h1
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc8
-rw-r--r--runtime/entrypoints/entrypoint_utils.h5
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.cc56
-rw-r--r--runtime/entrypoints/interpreter/interpreter_entrypoints.h43
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.h2
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc10
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc24
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc6
-rw-r--r--runtime/entrypoints_order_test.cc15
-rw-r--r--runtime/exception_test.cc6
-rw-r--r--runtime/experimental_flags.h88
-rw-r--r--runtime/fault_handler.cc13
-rw-r--r--runtime/gc/accounting/remembered_set.cc3
-rw-r--r--runtime/gc/allocator/dlmalloc.cc9
-rw-r--r--runtime/gc/allocator/rosalloc.h31
-rw-r--r--runtime/gc/collector/concurrent_copying.cc7
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc3
-rw-r--r--runtime/gc/space/image_space.cc15
-rw-r--r--runtime/gc/space/image_space.h5
-rw-r--r--runtime/gc/space/memory_tool_malloc_space.h4
-rw-r--r--runtime/gc/space/zygote_space.cc3
-rw-r--r--runtime/gc/task_processor_test.cc3
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/indirect_reference_table.cc1
-rw-r--r--runtime/instrumentation.cc12
-rw-r--r--runtime/instrumentation.h5
-rw-r--r--runtime/intern_table.cc8
-rw-r--r--runtime/intern_table.h1
-rw-r--r--runtime/interpreter/interpreter.cc14
-rw-r--r--runtime/interpreter/interpreter.h11
-rw-r--r--runtime/interpreter/interpreter_common.cc55
-rw-r--r--runtime/interpreter/interpreter_common.h25
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc19
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc21
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jdwp/jdwp_handler.cc3
-rw-r--r--runtime/jni_env_ext.cc3
-rw-r--r--runtime/jni_internal.cc5
-rw-r--r--runtime/jvalue.h8
-rw-r--r--runtime/leb128.h36
-rw-r--r--runtime/mem_map.cc20
-rw-r--r--runtime/mem_map.h2
-rw-r--r--runtime/mirror/array-inl.h3
-rw-r--r--runtime/mirror/class-inl.h3
-rw-r--r--runtime/mirror/class.h21
-rw-r--r--runtime/mirror/dex_cache_test.cc25
-rw-r--r--runtime/mirror/object.h10
-rw-r--r--runtime/modifiers.h5
-rw-r--r--runtime/monitor.cc6
-rw-r--r--runtime/monitor.h12
-rw-r--r--runtime/monitor_android.cc4
-rw-r--r--runtime/monitor_linux.cc2
-rw-r--r--runtime/native/dalvik_system_DexFile.cc166
-rw-r--r--runtime/native/dalvik_system_DexFile.h4
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc3
-rw-r--r--runtime/native/java_lang_Class.cc4
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc15
-rw-r--r--runtime/oat.cc6
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file-inl.h16
-rw-r--r--runtime/oat_file.cc87
-rw-r--r--runtime/oat_file.h9
-rw-r--r--runtime/oat_file_assistant_test.cc27
-rw-r--r--runtime/oat_file_manager.cc394
-rw-r--r--runtime/oat_file_manager.h125
-rw-r--r--runtime/parsed_options.cc21
-rw-r--r--runtime/quick/inline_method_analyser.h1
-rw-r--r--runtime/quick_exception_handler.cc10
-rw-r--r--runtime/runtime.cc37
-rw-r--r--runtime/runtime.h22
-rw-r--r--runtime/runtime_linux.cc2
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/stack.cc188
-rw-r--r--runtime/stack.h80
-rw-r--r--runtime/thread.cc64
-rw-r--r--runtime/thread.h26
-rw-r--r--runtime/trace.cc29
-rw-r--r--runtime/utils.cc9
-rw-r--r--runtime/utils.h25
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h15
-rw-r--r--runtime/utils/dex_cache_arrays_layout.h5
-rw-r--r--runtime/verifier/method_verifier.cc35
-rw-r--r--runtime/verifier/register_line.cc126
-rw-r--r--test/004-JniTest/src/Main.java2
-rw-r--r--test/004-ReferenceMap/stack_walk_refmap_jni.cc10
-rw-r--r--test/079-phantom/src/Bitmap.java5
-rw-r--r--test/088-monitor-verification/smali/NullLocks.smali28
-rw-r--r--test/088-monitor-verification/src/Main.java45
-rw-r--r--test/088-monitor-verification/src/TwoPath.java53
-rw-r--r--test/131-structural-change/expected.txt1
-rw-r--r--test/131-structural-change/src/Main.java6
-rw-r--r--test/141-class-unload/expected.txt1
-rw-r--r--test/141-class-unload/src/Main.java24
-rw-r--r--test/454-get-vreg/get_vreg_jni.cc17
-rw-r--r--test/457-regs/regs_jni.cc5
-rw-r--r--test/466-get-live-vreg/get_live_vreg_jni.cc3
-rw-r--r--test/529-checker-unresolved/expected.txt2
-rw-r--r--test/529-checker-unresolved/src/Main.java16
-rw-r--r--test/529-checker-unresolved/src/Unresolved.java10
-rw-r--r--test/536-checker-intrinsic-optimization/expected.txt0
-rw-r--r--test/536-checker-intrinsic-optimization/info.txt0
-rw-r--r--test/536-checker-intrinsic-optimization/src/Main.java71
-rw-r--r--test/536-checker-needs-access-check/expected.txt4
-rw-r--r--test/536-checker-needs-access-check/info.txt1
-rw-r--r--test/536-checker-needs-access-check/src/Main.java82
-rw-r--r--test/536-checker-needs-access-check/src/other/InaccessibleClass.java20
-rw-r--r--test/536-checker-needs-access-check/src/other/InaccessibleClassProxy.java23
-rw-r--r--test/536-checker-needs-access-check/src2/other/InaccessibleClass.java20
-rw-r--r--test/536-checker-needs-access-check/src2/other/InaccessibleClassProxy.java23
-rw-r--r--test/537-checker-arraycopy/expected.txt0
-rw-r--r--test/537-checker-arraycopy/info.txt1
-rw-r--r--test/537-checker-arraycopy/src/Main.java71
-rw-r--r--test/537-checker-debuggable/expected.txt0
-rw-r--r--test/537-checker-debuggable/info.txt1
-rw-r--r--test/537-checker-debuggable/smali/TestCase.smali42
-rw-r--r--test/537-checker-debuggable/src/Main.java23
-rw-r--r--test/537-checker-inline-and-unverified/expected.txt0
-rw-r--r--test/537-checker-inline-and-unverified/info.txt1
-rw-r--r--test/537-checker-inline-and-unverified/src/Main.java59
-rw-r--r--test/537-checker-inline-and-unverified/src/other/InaccessibleClass.java20
-rw-r--r--test/537-checker-inline-and-unverified/src2/other/InaccessibleClass.java20
-rw-r--r--test/538-checker-embed-constants/expected.txt0
-rw-r--r--test/538-checker-embed-constants/info.txt1
-rw-r--r--test/538-checker-embed-constants/src/Main.java290
-rw-r--r--test/539-checker-arm64-encodable-immediates/expected.txt0
-rw-r--r--test/539-checker-arm64-encodable-immediates/info.txt2
-rw-r--r--test/539-checker-arm64-encodable-immediates/src/Main.java52
-rw-r--r--test/540-checker-rtp-bug/expected.txt1
-rw-r--r--test/540-checker-rtp-bug/info.txt1
-rw-r--r--test/540-checker-rtp-bug/src/Main.java102
-rwxr-xr-xtest/955-lambda-smali/run2
-rwxr-xr-xtest/960-default-smali/build33
-rw-r--r--test/960-default-smali/expected.txt84
-rw-r--r--test/960-default-smali/info.txt19
-rwxr-xr-xtest/960-default-smali/run21
-rw-r--r--test/960-default-smali/smali/A.smali38
-rw-r--r--test/960-default-smali/smali/Attendant.smali53
-rw-r--r--test/960-default-smali/smali/B.smali38
-rw-r--r--test/960-default-smali/smali/C.smali37
-rw-r--r--test/960-default-smali/smali/D.smali38
-rw-r--r--test/960-default-smali/smali/E.smali38
-rw-r--r--test/960-default-smali/smali/Extension.smali30
-rw-r--r--test/960-default-smali/smali/F.smali47
-rw-r--r--test/960-default-smali/smali/G.smali37
-rw-r--r--test/960-default-smali/smali/Greeter.smali40
-rw-r--r--test/960-default-smali/smali/Greeter2.smali39
-rw-r--r--test/960-default-smali/smali/Greeter3.smali40
-rw-r--r--test/960-default-smali/smali/H.smali28
-rw-r--r--test/960-default-smali/smali/I.smali28
-rw-r--r--test/960-default-smali/smali/J.smali29
-rw-r--r--test/960-default-smali/smali/classes.xml127
-rwxr-xr-xtest/960-default-smali/util-src/generate_smali.py376
-rwxr-xr-xtest/961-default-iface-resolution-generated/build47
-rw-r--r--test/961-default-iface-resolution-generated/expected.txt1
-rw-r--r--test/961-default-iface-resolution-generated/info.txt17
-rwxr-xr-xtest/961-default-iface-resolution-generated/run21
-rwxr-xr-xtest/961-default-iface-resolution-generated/util-src/generate_smali.py466
-rwxr-xr-xtest/962-iface-static/build30
-rw-r--r--test/962-iface-static/expected.txt3
-rw-r--r--test/962-iface-static/info.txt4
-rwxr-xr-xtest/962-iface-static/run21
-rw-r--r--test/962-iface-static/smali/Displayer.smali45
-rw-r--r--test/962-iface-static/smali/Main.smali40
-rw-r--r--test/962-iface-static/smali/iface.smali43
-rwxr-xr-xtest/963-default-range-smali/build30
-rw-r--r--test/963-default-range-smali/expected.txt2
-rw-r--r--test/963-default-range-smali/info.txt4
-rwxr-xr-xtest/963-default-range-smali/run21
-rw-r--r--test/963-default-range-smali/smali/A.smali29
-rw-r--r--test/963-default-range-smali/smali/Main.smali77
-rw-r--r--test/963-default-range-smali/smali/iface.smali40
-rwxr-xr-xtest/964-default-iface-init-generated/build45
-rw-r--r--test/964-default-iface-init-generated/expected.txt1
-rw-r--r--test/964-default-iface-init-generated/info.txt17
-rwxr-xr-xtest/964-default-iface-init-generated/run21
-rw-r--r--test/964-default-iface-init-generated/smali/Displayer.smali45
-rwxr-xr-xtest/964-default-iface-init-generated/util-src/generate_smali.py531
-rw-r--r--test/Android.run-test.mk34
-rwxr-xr-xtest/etc/default-build2
-rwxr-xr-xtest/run-all-tests5
-rwxr-xr-xtest/run-test32
-rw-r--r--test/utils/python/testgen/mixins.py135
-rw-r--r--test/utils/python/testgen/utils.py80
-rw-r--r--tools/ahat/README.txt5
-rw-r--r--tools/ahat/src/InstanceUtils.java61
-rw-r--r--tools/art3
-rwxr-xr-xtools/buildbot-build.sh8
-rwxr-xr-xtools/checker/checker.py10
-rw-r--r--tools/checker/file_format/checker/parser.py20
-rw-r--r--tools/checker/file_format/checker/struct.py3
-rw-r--r--tools/checker/file_format/checker/test.py48
-rw-r--r--tools/checker/match/file.py5
-rwxr-xr-xtools/checker/run_unit_tests.py2
-rw-r--r--tools/dmtracedump/Android.mk32
-rw-r--r--tools/dmtracedump/createtesttrace.cc449
-rwxr-xr-xtools/dmtracedump/dmtracedump.pl18
-rw-r--r--tools/dmtracedump/dumpdir.sh11
-rw-r--r--tools/dmtracedump/profile.h43
-rw-r--r--tools/dmtracedump/tracedump.cc2616
-rwxr-xr-xtools/extract-embedded-java35
400 files changed, 19178 insertions, 4728 deletions
diff --git a/.gitignore b/.gitignore
index c4cf98b37c..4e806c6514 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
JIT_ART
+**/__pycache__/**
diff --git a/Android.mk b/Android.mk
index b8ba9f26b2..fcf70ff2eb 100644
--- a/Android.mk
+++ b/Android.mk
@@ -89,6 +89,7 @@ include $(art_path)/dalvikvm/Android.mk
include $(art_path)/tools/Android.mk
include $(art_path)/tools/ahat/Android.mk
include $(art_path)/tools/dexfuzz/Android.mk
+include $(art_path)/tools/dmtracedump/Android.mk
include $(art_path)/sigchainlib/Android.mk
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9775f6a5d7..1b54a510fd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -65,6 +65,7 @@ $(ART_TEST_TARGET_GTEST_MainStripped_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX)
# Dex file dependencies for each gtest.
ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MultiDex MyClass Nested Statics StaticsFromCode
ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod StaticLeafMethods
+ART_GTEST_dex_cache_test_DEX_DEPS := Main
ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Main Nested
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
@@ -348,6 +349,7 @@ COMPILER_GTEST_HOST_SRC_FILES_arm64 := \
COMPILER_GTEST_HOST_SRC_FILES_mips := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips) \
+ compiler/utils/mips/assembler_mips_test.cc \
COMPILER_GTEST_HOST_SRC_FILES_mips64 := \
$(COMPILER_GTEST_COMMON_SRC_FILES_mips64) \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 3272c27f2b..592843e0bd 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -57,7 +57,7 @@ define create-core-oat-host-rules
endif
ifeq ($(1),optimizing)
core_compile_options += --compiler-backend=Optimizing
- core_dex2oat_dependency += $(DEX2OAT)
+ core_dex2oat_dependency := $(DEX2OAT)
core_infix := -optimizing
endif
ifeq ($(1),interpreter)
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 52df7deb25..f34b5edcc4 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -21,6 +21,7 @@
#include "utils.h"
#include <numeric>
#include "gtest/gtest.h"
+#include "runtime/experimental_flags.h"
#define EXPECT_NULL(expected) EXPECT_EQ(reinterpret_cast<const void*>(expected), \
reinterpret_cast<void*>(nullptr));
@@ -529,22 +530,32 @@ TEST_F(CmdlineParserTest, TestProfilerOptions) {
}
} // TEST_F
-/* -X[no]experimental-lambdas */
-TEST_F(CmdlineParserTest, TestExperimentalLambdas) {
+/* -Xexperimental:_ */
+TEST_F(CmdlineParserTest, TestExperimentalFlags) {
// Off by default
- EXPECT_SINGLE_PARSE_DEFAULT_VALUE(false,
+ EXPECT_SINGLE_PARSE_DEFAULT_VALUE(ExperimentalFlags::kNone,
"",
- M::ExperimentalLambdas);
+ M::Experimental);
// Disabled explicitly
- EXPECT_SINGLE_PARSE_VALUE(false,
- "-Xnoexperimental-lambdas",
- M::ExperimentalLambdas);
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kNone,
+ "-Xexperimental:none",
+ M::Experimental);
// Enabled explicitly
- EXPECT_SINGLE_PARSE_VALUE(true,
- "-Xexperimental-lambdas",
- M::ExperimentalLambdas);
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kLambdas,
+ "-Xexperimental:lambdas",
+ M::Experimental);
+ // Enabled explicitly
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kDefaultMethods,
+ "-Xexperimental:default-methods",
+ M::Experimental);
+
+ // Enabled both
+ EXPECT_SINGLE_PARSE_VALUE(ExperimentalFlags::kDefaultMethods | ExperimentalFlags::kLambdas,
+ "-Xexperimental:default-methods "
+ "-Xexperimental:lambdas",
+ M::Experimental);
}
// -Xverify:_
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index a57b6196de..c594adbc94 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -28,6 +28,7 @@
#include "jdwp/jdwp.h"
#include "runtime/base/logging.h"
#include "runtime/base/time_utils.h"
+#include "runtime/experimental_flags.h"
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "profiler_options.h"
@@ -838,6 +839,23 @@ struct CmdlineType<TestProfilerOptions> : CmdlineTypeParser<TestProfilerOptions>
static constexpr bool kCanParseBlankless = true;
};
+template<>
+struct CmdlineType<ExperimentalFlags> : CmdlineTypeParser<ExperimentalFlags> {
+ Result ParseAndAppend(const std::string& option, ExperimentalFlags& existing) {
+ if (option == "none") {
+ existing = existing | ExperimentalFlags::kNone;
+ } else if (option == "lambdas") {
+ existing = existing | ExperimentalFlags::kLambdas;
+ } else if (option == "default-methods") {
+ existing = existing | ExperimentalFlags::kDefaultMethods;
+ } else {
+ return Result::Failure(std::string("Unknown option '") + option + "'");
+ }
+ return Result::SuccessNoValue();
+ }
+
+ static const char* Name() { return "ExperimentalFlags"; }
+};
} // namespace art
#endif // ART_CMDLINE_CMDLINE_TYPES_H_
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 96e13ac9a3..20c80235ba 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -66,6 +66,7 @@ LIBART_COMPILER_SRC_FILES := \
optimizing/builder.cc \
optimizing/code_generator.cc \
optimizing/code_generator_utils.cc \
+ optimizing/constant_area_fixups_x86.cc \
optimizing/constant_folding.cc \
optimizing/dead_code_elimination.cc \
optimizing/graph_checker.cc \
@@ -210,7 +211,8 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_arm64 := \
dex/quick/arm64/arm64_lir.h
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips := \
- dex/quick/mips/mips_lir.h
+ dex/quick/mips/mips_lir.h \
+ utils/mips/assembler_mips.h
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips64 := \
$(LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES_mips)
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 9b4dbe02e2..8788dc1950 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -77,9 +77,8 @@ class Compiler {
* information.
* @note This is used for backtrace information in generated code.
*/
- virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
- const {
- UNUSED(driver);
+ virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(
+ const CompilerDriver& driver ATTRIBUTE_UNUSED) const {
return nullptr;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4efe4af896..b0972d98d4 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -511,9 +511,8 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags,
+ int width, int flags ATTRIBUTE_UNUSED,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
static_cast<int32_t>(insn->dalvikInsn.vB));
@@ -592,11 +591,15 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
}
/* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags, ArenaBitVector* try_block_addr,
- const uint16_t* code_ptr, const uint16_t* code_end,
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block,
+ MIR* insn,
+ DexOffset cur_offset,
+ int width,
+ int flags ATTRIBUTE_UNUSED,
+ ArenaBitVector* try_block_addr,
+ const uint16_t* code_ptr,
+ const uint16_t* code_end,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 0def056f4f..16414efada 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -53,10 +53,7 @@ class Pass {
* @param data the PassDataHolder.
* @return whether or not to execute the pass.
*/
- virtual bool Gate(const PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Gate(const PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Base class says yes.
return true;
}
@@ -64,17 +61,13 @@ class Pass {
/**
* @brief Start of the pass: called before the Worker function.
*/
- virtual void Start(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void Start(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
* @brief End of the pass: called after the WalkBasicBlocks function.
*/
- virtual void End(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void End(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 8762b53af4..34a6f630f1 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -125,8 +125,7 @@ class PassDriver {
* @brief Dispatch a patch.
* Gives the ability to add logic when running the patch.
*/
- virtual void DispatchPass(const Pass* pass) {
- UNUSED(pass);
+ virtual void DispatchPass(const Pass* pass ATTRIBUTE_UNUSED) {
}
/** @brief List of passes: provides the order to execute the passes.
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index db76cc6f53..b2bd6faca2 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -216,8 +216,7 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -239,8 +238,7 @@ void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
OpEndIT(it);
}
-void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -516,9 +514,8 @@ static const MagicTable magic_table[] = {
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -728,16 +725,19 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
return true;
}
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
UNREACHABLE();
}
@@ -1160,9 +1160,8 @@ void ArmMir2Lir::GenMaddMsubInt(RegLocation rl_dest, RegLocation rl_src1, RegLoc
}
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
int first_bit, int second_bit) {
- UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1257,9 +1256,8 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
-void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
+void ArmMir2Lir::GenMulLong(Instruction::Code opcode ATTRIBUTE_UNUSED, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
/*
* tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
* dest = src1.lo * src2.lo;
@@ -1564,8 +1562,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
- UNUSED(flags);
+ int flags ATTRIBUTE_UNUSED) {
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 5f27338e6b..355485e03b 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -987,8 +987,7 @@ int ArmMir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
return count;
}
-void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 062f7aff66..c31f46b8fe 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -419,20 +419,26 @@ LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return OpRegRegShift(op, r_dest_src1, r_src2, 0);
}
-LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* ArmMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
UNREACHABLE();
}
@@ -1243,14 +1249,17 @@ LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
return res;
}
-LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* ArmMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm";
UNREACHABLE();
}
-LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+LIR* ArmMir2Lir::InvokeTrampoline(OpKind op,
+ RegStorage r_tgt,
+ // The address of the trampoline is already loaded into r_tgt.
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 31cf6675af..d92dea21c2 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -37,14 +37,12 @@ LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage s
return OpCondBranch(cond, target);
}
-LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
- UNUSED(ccode, guide);
+LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
UNREACHABLE();
}
-void Arm64Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void Arm64Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -188,8 +186,7 @@ void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Con
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
-void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -413,9 +410,11 @@ static const MagicTable magic_table[] = {
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -457,9 +456,11 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d
return true;
}
-bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int64_t lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -599,15 +600,17 @@ bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_d
return true;
}
-bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
UNREACHABLE();
}
@@ -626,9 +629,11 @@ RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
UNREACHABLE();
}
@@ -963,14 +968,12 @@ void Arm64Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset,
dex_cache_access_insns_.push_back(ldr);
}
-LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6efa11e1fd..691bfd9edd 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -881,8 +881,7 @@ int Arm64Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* /*info*/, int /*first*/, int c
return count;
}
-void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 483231f931..58769ea9cc 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -672,22 +672,26 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
- MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
UNREACHABLE();
}
@@ -1381,14 +1385,15 @@ LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
return store;
}
-LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
- UNUSED(r_dest, r_src);
+LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* Arm64Mir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index d5ac34186b..cde99b3fae 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -16,6 +16,11 @@
#include "mir_to_lir-inl.h"
+// Mac does not provide endian.h, so we'll use byte order agnostic code.
+#ifndef __APPLE__
+#include <endian.h>
+#endif
+
#include "base/bit_vector-inl.h"
#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
@@ -841,7 +846,7 @@ void Mir2Lir::CreateNativeGcMap() {
references_buffer[i] = static_cast<uint8_t>(
raw_storage[i / sizeof(raw_storage[0])] >> (8u * (i % sizeof(raw_storage[0]))));
}
- native_gc_map_builder.AddEntry(native_offset, &references_buffer[0]);
+ native_gc_map_builder.AddEntry(native_offset, references_buffer.data());
prev_mir = mir;
}
}
@@ -987,8 +992,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
}
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
- UNUSED(offset);
+void Mir2Lir::MarkBoundary(DexOffset offset ATTRIBUTE_UNUSED, const char* inst_str) {
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -1353,8 +1357,8 @@ RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
return loc;
}
-void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index af93aabc91..e1a2838f3e 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -73,6 +73,7 @@ static constexpr bool kIntrinsicIsStatic[] = {
false, // kIntrinsicUnsafeGet
false, // kIntrinsicUnsafePut
true, // kIntrinsicSystemArrayCopyCharArray
+ true, // kIntrinsicSystemArrayCopy
};
static_assert(arraysize(kIntrinsicIsStatic) == kInlineOpNop,
"arraysize of kIntrinsicIsStatic unexpected");
@@ -121,6 +122,8 @@ static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafeGet], "UnsafeGet_must_not_be_s
static_assert(!kIntrinsicIsStatic[kIntrinsicUnsafePut], "UnsafePut must not be static");
static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
"SystemArrayCopyCharArray must be static");
+static_assert(kIntrinsicIsStatic[kIntrinsicSystemArrayCopy],
+ "SystemArrayCopy must be static");
MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
@@ -326,6 +329,9 @@ const DexFileMethodInliner::ProtoDef DexFileMethodInliner::kProtoCacheDefs[] = {
// kProtoCacheCharArrayICharArrayII_V
{ kClassCacheVoid, 5, {kClassCacheJavaLangCharArray, kClassCacheInt,
kClassCacheJavaLangCharArray, kClassCacheInt, kClassCacheInt} },
+ // kProtoCacheObjectIObjectII_V
+ { kClassCacheVoid, 5, {kClassCacheJavaLangObject, kClassCacheInt,
+ kClassCacheJavaLangObject, kClassCacheInt, kClassCacheInt} },
// kProtoCacheIICharArrayI_V
{ kClassCacheVoid, 4, { kClassCacheInt, kClassCacheInt, kClassCacheJavaLangCharArray,
kClassCacheInt } },
@@ -481,6 +487,8 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
INTRINSIC(JavaLangSystem, ArrayCopy, CharArrayICharArrayII_V , kIntrinsicSystemArrayCopyCharArray,
0),
+ INTRINSIC(JavaLangSystem, ArrayCopy, ObjectIObjectII_V , kIntrinsicSystemArrayCopy,
+ 0),
INTRINSIC(JavaLangInteger, RotateRight, II_I, kIntrinsicRotateRight, k32),
INTRINSIC(JavaLangLong, RotateRight, JI_J, kIntrinsicRotateRight, k64),
@@ -653,6 +661,7 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
case kIntrinsicNumberOfTrailingZeros:
case kIntrinsicRotateRight:
case kIntrinsicRotateLeft:
+ case kIntrinsicSystemArrayCopy:
return false; // not implemented in quick.
default:
LOG(FATAL) << "Unexpected intrinsic opcode: " << intrinsic.opcode;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 8458806e5e..5ce110c120 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -259,6 +259,7 @@ class DexFileMethodInliner {
kProtoCacheObjectJ_Object,
kProtoCacheObjectJObject_V,
kProtoCacheCharArrayICharArrayII_V,
+ kProtoCacheObjectIObjectII_V,
kProtoCacheIICharArrayI_V,
kProtoCacheByteArrayIII_String,
kProtoCacheIICharArray_String,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2a1d64425b..2b60a51e22 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -2102,15 +2102,15 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
}
/* Call out to helper assembly routine that will null check obj and then lock it. */
-void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorEnter(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
-void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorExit(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 3c5c2fe010..422d82ffa2 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -521,10 +521,9 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_fro
* kArg1 here rather than the standard GenDalvikArgs.
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, const MethodReference& target_method,
+ int state, const MethodReference& target_method ATTRIBUTE_UNUSED,
uint32_t method_idx, uintptr_t, uintptr_t,
InvokeType) {
- UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -607,10 +606,12 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
return state + 1;
}
-static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
- QuickEntrypointEnum trampoline, int state,
- const MethodReference& target_method, uint32_t method_idx) {
- UNUSED(info, method_idx);
+static int NextInvokeInsnSP(CompilationUnit* cu,
+ CallInfo* info ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline,
+ int state,
+ const MethodReference& target_method,
+ uint32_t method_idx ATTRIBUTE_UNUSED) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
@@ -1266,35 +1267,31 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
return true;
}
-bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
+bool Mir2Lir::GenInlinedReverseBits(CallInfo* info ATTRIBUTE_UNUSED, OpSize size ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, size);
return false;
}
-bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
+bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, is_min, is_double);
return false;
}
-bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedCeil(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedFloor(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRint(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedRint(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
- UNUSED(info, is_double);
+bool Mir2Lir::GenInlinedRound(CallInfo* info ATTRIBUTE_UNUSED, bool is_double ATTRIBUTE_UNUSED) {
return false;
}
@@ -1328,8 +1325,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
return true;
}
-bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
index 3e9fb96bfa..c425fc852d 100644
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
+++ b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
@@ -28,8 +28,8 @@ namespace dwarf {
// When we are generating the CFI code, we do not know the instuction offsets,
// this class stores the LIR references and patches the instruction stream later.
class LazyDebugFrameOpCodeWriter FINAL
- : public DebugFrameOpCodeWriter<ArenaAllocatorAdapter<uint8_t>> {
- typedef DebugFrameOpCodeWriter<ArenaAllocatorAdapter<uint8_t>> Base;
+ : public DebugFrameOpCodeWriter<ArenaVector<uint8_t>> {
+ typedef DebugFrameOpCodeWriter<ArenaVector<uint8_t>> Base;
public:
// This method is implicitely called the by opcode writers.
virtual void ImplicitlyAdvancePC() OVERRIDE {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8863c058a1..4a736f3d93 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -32,9 +32,10 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ const InlineMethod& special ATTRIBUTE_UNUSED) {
// TODO
- UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 45fd1a9433..52706df7a5 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -115,17 +115,17 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
}
-void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
}
@@ -254,8 +254,10 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLoc
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
- UNUSED(bb, mir, gt_bias, is_double);
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ bool gt_bias ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -288,9 +290,10 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
StoreValueWide(rl_dest, rl_result);
}
-bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED) {
// TODO: need Mips implementation.
- UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1099303f7d..8ca53ea228 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -279,8 +279,7 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -290,13 +289,12 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
ne_branchover->target = target_label;
}
-void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
-void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -327,39 +325,40 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- UNUSED(info, is_long, is_object);
+bool MipsMir2Lir::GenInlinedCas(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED,
+ bool is_object ATTRIBUTE_UNUSED) {
return false;
}
-bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
@@ -408,27 +407,26 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
return true;
}
-void MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
- UNUSED(reg, target);
+void MipsMir2Lir::OpPcRelLoad(RegStorage reg ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
UNREACHABLE();
}
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -462,27 +460,28 @@ LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targ
return OpCmpImmBranch(c_code, reg, 0, target);
}
-bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
+bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool MipsMir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* MipsMir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in Mips";
UNREACHABLE();
}
-void MipsMir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void MipsMir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
@@ -621,9 +620,12 @@ void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocati
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(opcode);
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode ATTRIBUTE_UNUSED,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// TODO: Implement easy div/rem?
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
@@ -855,9 +857,11 @@ void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift, int flags) {
- UNUSED(flags);
+void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_shift,
+ int flags ATTRIBUTE_UNUSED) {
if (!cu_->target64) {
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index ec2475a7f7..372fe2b599 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -103,18 +103,15 @@ bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
-bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
@@ -520,21 +517,26 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
-LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* MipsMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
UNREACHABLE();
}
@@ -1031,14 +1033,14 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
return store;
}
-LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* MipsMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- UNUSED(cc, target);
+LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c50246d182..8da386368b 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1411,8 +1411,7 @@ void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const
rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
}
-size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNUSED(lir);
+size_t Mir2Lir::GetInstructionOffset(LIR* lir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4e3aab2f0b..a0db1e87ba 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1463,8 +1463,7 @@ class Mir2Lir {
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
- virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
- UNUSED(opcode);
+ virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode ATTRIBUTE_UNUSED) {
return InexpensiveConstantInt(value);
}
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 1cd742abac..6673ea8ac5 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -37,6 +37,7 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "elf_writer_quick.h"
+#include "experimental_flags.h"
#include "jni/quick/jni_compiler.h"
#include "mir_to_lir.h"
#include "mirror/object.h"
@@ -523,7 +524,8 @@ static bool SkipScanningUnsupportedOpcodes(InstructionSet instruction_set) {
// All opcodes are supported no matter what. Usually not the case
// since experimental opcodes are not implemented in the quick compiler.
return true;
- } else if (LIKELY(!Runtime::Current()->AreExperimentalLambdasEnabled())) {
+ } else if (LIKELY(!Runtime::Current()->
+ AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas))) {
// Experimental opcodes are disabled.
//
// If all unsupported opcodes are experimental we don't need to do scanning.
@@ -849,8 +851,8 @@ uintptr_t QuickCompiler::GetEntryPointOf(ArtMethod* method) const {
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
-Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) {
- UNUSED(compilation_unit);
+Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu,
+ void* compilation_unit ATTRIBUTE_UNUSED) {
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 8ec86fa56c..d9d0434e8a 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -320,15 +320,13 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedDouble(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedSingle(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
UNREACHABLE();
}
@@ -1553,8 +1551,7 @@ int Mir2Lir::GetSRegHi(int lowSreg) {
return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
}
-bool Mir2Lir::LiveOut(int s_reg) {
- UNUSED(s_reg);
+bool Mir2Lir::LiveOut(int s_reg ATTRIBUTE_UNUSED) {
// For now.
return true;
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 12523aca76..64becb9b85 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1629,8 +1629,8 @@ void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr) {
- UNUSED(start_addr);
+AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn,
+ CodeOffset start_addr ATTRIBUTE_UNUSED) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 8e81746db5..b11d41caf0 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -124,17 +124,17 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
StoreValueWide(rl_dest, rl_result);
}
-void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
}
-void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index ecd23e9ef0..a8706c3b09 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -270,8 +270,7 @@ void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
}
-void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void X86Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -597,8 +596,10 @@ void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& sh
shift = (is_long) ? p - 64 : p - 32;
}
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
- UNUSED(rl_dest, reg_lo, lit, is_div);
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
UNREACHABLE();
}
@@ -766,16 +767,19 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
return rl_result;
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
- bool is_div) {
- UNUSED(rl_dest, reg_lo, reg_hi, is_div);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ RegStorage reg_hi ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
UNREACHABLE();
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1449,22 +1453,21 @@ void X86Mir2Lir::OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, R
}
}
-LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for x86";
UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1595,27 +1598,28 @@ LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe
return OpCondBranch(c_code, target);
}
-bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in x86";
UNREACHABLE();
}
-bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool X86Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* X86Mir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in x86";
UNREACHABLE();
}
-void X86Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void X86Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
UNREACHABLE();
}
@@ -1634,8 +1638,10 @@ void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
}
}
-void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
- UNUSED(sreg);
+void X86Mir2Lir::GenImulMemImm(RegStorage dest,
+ int sreg ATTRIBUTE_UNUSED,
+ int displacement,
+ int val) {
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2548,9 +2554,11 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
}
-RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, int shift_amount, int flags) {
- UNUSED(flags);
+RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src,
+ int shift_amount,
+ int flags ATTRIBUTE_UNUSED) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c62cd47315..25fb8869b6 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -254,8 +254,7 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
: RegStorage32FromSpecialTargetRegister_Target32[reg];
}
-RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- UNUSED(reg);
+RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Do not use this function!!!";
UNREACHABLE();
}
@@ -861,8 +860,7 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
}
// Not used in x86(-64)
-RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
- UNUSED(trampoline);
+RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
UNREACHABLE();
}
@@ -2323,13 +2321,11 @@ void X86Mir2Lir::GenSetVector(MIR* mir) {
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index b16ae982f2..61354dfc53 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -57,8 +57,7 @@ LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
return res;
}
-bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -66,8 +65,7 @@ bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
return value == 0;
}
-bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -942,9 +940,14 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
return store;
}
-LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target, LIR** compare) {
- UNUSED(temp_reg); // Comparison performed directly with memory.
+LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond,
+ // Comparison performed directly with memory.
+ RegStorage temp_reg ATTRIBUTE_UNUSED,
+ RegStorage base_reg,
+ int offset,
+ int check_value,
+ LIR* target,
+ LIR** compare) {
LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
offset, check_value);
if (compare != nullptr) {
@@ -1114,8 +1117,11 @@ RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
return loc;
}
-LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
+LIR* X86Mir2Lir::InvokeTrampoline(OpKind op,
+ // Call to absolute memory location doesn't
+ // need a temporary target register.
+ RegStorage r_tgt ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline) {
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 6f2b2341e0..65b0ad6400 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -34,7 +34,6 @@ VerificationResults::VerificationResults(const CompilerOptions* compiler_options
verified_methods_(),
rejected_classes_lock_("compiler rejected classes lock"),
rejected_classes_() {
- UNUSED(compiler_options);
}
VerificationResults::~VerificationResults() {
diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h
index e535afd272..1a7dbe3a9f 100644
--- a/compiler/driver/compiler_driver-inl.h
+++ b/compiler/driver/compiler_driver-inl.h
@@ -370,7 +370,9 @@ inline int CompilerDriver::IsFastInvoke(
nullptr, kVirtual);
} else {
StackHandleScope<1> hs(soa.Self());
- auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(*devirt_target->dex_file)));
+ auto target_dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ *devirt_target->dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
called_method = class_linker->ResolveMethod(
*devirt_target->dex_file, devirt_target->dex_method_index, target_dex_cache,
class_loader, nullptr, kVirtual);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index f74b079aed..8324bf30d6 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -456,14 +456,6 @@ CompilerDriver::~CompilerDriver() {
type ## _ENTRYPOINT_OFFSET(4, offset)); \
}
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterBridge() const {
- CREATE_TRAMPOLINE(INTERPRETER, kInterpreterAbi, pInterpreterToInterpreterBridge)
-}
-
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToCompiledCodeBridge() const {
- CREATE_TRAMPOLINE(INTERPRETER, kInterpreterAbi, pInterpreterToCompiledCodeBridge)
-}
-
const std::vector<uint8_t>* CompilerDriver::CreateJniDlsymLookup() const {
CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup)
}
@@ -961,7 +953,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) {
uint16_t exception_type_idx = exception_type.first;
const DexFile* dex_file = exception_type.second;
StackHandleScope<2> hs2(self);
- Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(
+ *dex_file,
+ Runtime::Current()->GetLinearAlloc())));
Handle<mirror::Class> klass(hs2.NewHandle(
class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache,
NullHandle<mirror::ClassLoader>())));
@@ -2018,9 +2012,11 @@ class ResolveTypeVisitor : public CompilationVisitor {
ClassLinker* class_linker = manager_->GetClassLinker();
const DexFile& dex_file = *manager_->GetDexFile();
StackHandleScope<2> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(dex_file)));
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(manager_->GetClassLoader())));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->RegisterDexFile(
+ dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()))));
mirror::Class* klass = class_linker->ResolveType(dex_file, type_idx, dex_cache, class_loader);
if (klass == nullptr) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 11e782f437..0dc8261aac 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -155,10 +155,6 @@ class CompilerDriver {
}
// Generate the trampolines that are invoked by unresolved direct methods.
- const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
- SHARED_REQUIRES(Locks::mutator_lock_);
- const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
- SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateJniDlsymLookup() const
SHARED_REQUIRES(Locks::mutator_lock_);
const std::vector<uint8_t>* CreateQuickGenericJniTrampoline() const
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/dwarf/debug_frame_opcode_writer.h
index d8077d5da9..60241f722c 100644
--- a/compiler/dwarf/debug_frame_opcode_writer.h
+++ b/compiler/dwarf/debug_frame_opcode_writer.h
@@ -31,8 +31,10 @@ namespace dwarf {
// * Choose the most compact encoding of a given opcode.
// * Keep track of current state and convert absolute values to deltas.
// * Divide by header-defined factors as appropriate.
-template<typename Allocator = std::allocator<uint8_t> >
-class DebugFrameOpCodeWriter : private Writer<Allocator> {
+template<typename Vector = std::vector<uint8_t> >
+class DebugFrameOpCodeWriter : private Writer<Vector> {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
// To save space, DWARF divides most offsets by header-defined factors.
// They are used in integer divisions, so we make them constants.
@@ -288,11 +290,12 @@ class DebugFrameOpCodeWriter : private Writer<Allocator> {
void SetCurrentCFAOffset(int offset) { current_cfa_offset_ = offset; }
- using Writer<Allocator>::data;
+ using Writer<Vector>::data;
DebugFrameOpCodeWriter(bool enabled = true,
- const Allocator& alloc = Allocator())
- : Writer<Allocator>(&opcodes_),
+ const typename Vector::allocator_type& alloc =
+ typename Vector::allocator_type())
+ : Writer<Vector>(&opcodes_),
enabled_(enabled),
opcodes_(alloc),
current_cfa_offset_(0),
@@ -318,7 +321,7 @@ class DebugFrameOpCodeWriter : private Writer<Allocator> {
}
bool enabled_; // If disabled all writes are no-ops.
- std::vector<uint8_t, Allocator> opcodes_;
+ Vector opcodes_;
int current_cfa_offset_;
int current_pc_;
bool uses_dwarf3_features_;
diff --git a/compiler/dwarf/debug_info_entry_writer.h b/compiler/dwarf/debug_info_entry_writer.h
index f5b9ca5b64..d9b367bdf1 100644
--- a/compiler/dwarf/debug_info_entry_writer.h
+++ b/compiler/dwarf/debug_info_entry_writer.h
@@ -29,9 +29,11 @@ namespace dwarf {
// 32-bit FNV-1a hash function which we use to find duplicate abbreviations.
// See http://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function
-template< typename Allocator >
+template <typename Vector>
struct FNVHash {
- size_t operator()(const std::vector<uint8_t, Allocator>& v) const {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
+ size_t operator()(const Vector& v) const {
uint32_t hash = 2166136261u;
for (size_t i = 0; i < v.size(); i++) {
hash = (hash ^ v[i]) * 16777619u;
@@ -52,8 +54,10 @@ struct FNVHash {
* EndTag();
* EndTag();
*/
-template< typename Allocator = std::allocator<uint8_t> >
-class DebugInfoEntryWriter FINAL : private Writer<Allocator> {
+template <typename Vector = std::vector<uint8_t>>
+class DebugInfoEntryWriter FINAL : private Writer<Vector> {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
// Start debugging information entry.
void StartTag(Tag tag, Children children) {
@@ -176,12 +180,13 @@ class DebugInfoEntryWriter FINAL : private Writer<Allocator> {
return patch_locations_;
}
- using Writer<Allocator>::data;
+ using Writer<Vector>::data;
DebugInfoEntryWriter(bool is64bitArch,
- std::vector<uint8_t, Allocator>* debug_abbrev,
- const Allocator& alloc = Allocator())
- : Writer<Allocator>(&entries_),
+ Vector* debug_abbrev,
+ const typename Vector::allocator_type& alloc =
+ typename Vector::allocator_type())
+ : Writer<Vector>(&entries_),
debug_abbrev_(debug_abbrev),
current_abbrev_(alloc),
abbrev_codes_(alloc),
@@ -221,7 +226,7 @@ class DebugInfoEntryWriter FINAL : private Writer<Allocator> {
NextAbbrevCode()));
int abbrev_code = it.first->second;
if (UNLIKELY(it.second)) { // Inserted new entry.
- const std::vector<uint8_t, Allocator>& abbrev = it.first->first;
+ const Vector& abbrev = it.first->first;
debug_abbrev_.Pop(); // Remove abbrev table terminator.
debug_abbrev_.PushUleb128(abbrev_code);
debug_abbrev_.PushData(abbrev.data(), abbrev.size());
@@ -234,13 +239,13 @@ class DebugInfoEntryWriter FINAL : private Writer<Allocator> {
private:
// Fields for writing and deduplication of abbrevs.
- Writer<Allocator> debug_abbrev_;
- std::vector<uint8_t, Allocator> current_abbrev_;
- std::unordered_map<std::vector<uint8_t, Allocator>, int,
- FNVHash<Allocator> > abbrev_codes_;
+ Writer<Vector> debug_abbrev_;
+ Vector current_abbrev_;
+ std::unordered_map<Vector, int,
+ FNVHash<Vector> > abbrev_codes_;
// Fields for writing of debugging information entries.
- std::vector<uint8_t, Allocator> entries_;
+ Vector entries_;
bool is64bit_;
int depth_ = 0;
size_t abbrev_code_offset_ = 0; // Location to patch once we know the code.
diff --git a/compiler/dwarf/debug_line_opcode_writer.h b/compiler/dwarf/debug_line_opcode_writer.h
index bdc25e4a30..201f0b40bc 100644
--- a/compiler/dwarf/debug_line_opcode_writer.h
+++ b/compiler/dwarf/debug_line_opcode_writer.h
@@ -30,8 +30,10 @@ namespace dwarf {
// * Choose the most compact encoding of a given opcode.
// * Keep track of current state and convert absolute values to deltas.
// * Divide by header-defined factors as appropriate.
-template<typename Allocator = std::allocator<uint8_t>>
-class DebugLineOpCodeWriter FINAL : private Writer<Allocator> {
+template<typename Vector = std::vector<uint8_t>>
+class DebugLineOpCodeWriter FINAL : private Writer<Vector> {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
static constexpr int kOpcodeBase = 13;
static constexpr bool kDefaultIsStmt = true;
@@ -212,12 +214,13 @@ class DebugLineOpCodeWriter FINAL : private Writer<Allocator> {
return patch_locations_;
}
- using Writer<Allocator>::data;
+ using Writer<Vector>::data;
DebugLineOpCodeWriter(bool use64bitAddress,
int codeFactorBits,
- const Allocator& alloc = Allocator())
- : Writer<Allocator>(&opcodes_),
+ const typename Vector::allocator_type& alloc =
+ typename Vector::allocator_type())
+ : Writer<Vector>(&opcodes_),
opcodes_(alloc),
uses_dwarf3_features_(false),
use_64bit_address_(use64bitAddress),
@@ -234,7 +237,7 @@ class DebugLineOpCodeWriter FINAL : private Writer<Allocator> {
return offset >> code_factor_bits_;
}
- std::vector<uint8_t, Allocator> opcodes_;
+ Vector opcodes_;
bool uses_dwarf3_features_;
bool use_64bit_address_;
int code_factor_bits_;
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index ae57755f43..b7eff19bed 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -36,21 +36,23 @@ namespace dwarf {
// In particular, it is not related to machine architecture.
// Write common information entry (CIE) to .debug_frame or .eh_frame section.
-template<typename Allocator>
+template<typename Vector>
void WriteDebugFrameCIE(bool is64bit,
ExceptionHeaderValueApplication address_type,
Reg return_address_register,
- const DebugFrameOpCodeWriter<Allocator>& opcodes,
+ const DebugFrameOpCodeWriter<Vector>& opcodes,
CFIFormat format,
std::vector<uint8_t>* debug_frame) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
Writer<> writer(debug_frame);
size_t cie_header_start_ = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
writer.PushUint32((format == DW_EH_FRAME_FORMAT) ? 0 : 0xFFFFFFFF); // CIE id.
writer.PushUint8(1); // Version.
writer.PushString("zR");
- writer.PushUleb128(DebugFrameOpCodeWriter<Allocator>::kCodeAlignmentFactor);
- writer.PushSleb128(DebugFrameOpCodeWriter<Allocator>::kDataAlignmentFactor);
+ writer.PushUleb128(DebugFrameOpCodeWriter<Vector>::kCodeAlignmentFactor);
+ writer.PushSleb128(DebugFrameOpCodeWriter<Vector>::kDataAlignmentFactor);
writer.PushUleb128(return_address_register.num()); // ubyte in DWARF2.
writer.PushUleb128(1); // z: Augmentation data size.
if (is64bit) {
@@ -74,13 +76,15 @@ void WriteDebugFrameCIE(bool is64bit,
}
// Write frame description entry (FDE) to .debug_frame or .eh_frame section.
-template<typename Allocator>
+template<typename Vector>
void WriteDebugFrameFDE(bool is64bit, size_t cie_offset,
uint64_t initial_address, uint64_t address_range,
- const std::vector<uint8_t, Allocator>* opcodes,
+ const Vector* opcodes,
CFIFormat format,
std::vector<uint8_t>* debug_frame,
std::vector<uintptr_t>* debug_frame_patches) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
Writer<> writer(debug_frame);
size_t fde_header_start = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
@@ -107,11 +111,13 @@ void WriteDebugFrameFDE(bool is64bit, size_t cie_offset,
}
// Write compilation unit (CU) to .debug_info section.
-template<typename Allocator>
+template<typename Vector>
void WriteDebugInfoCU(uint32_t debug_abbrev_offset,
- const DebugInfoEntryWriter<Allocator>& entries,
+ const DebugInfoEntryWriter<Vector>& entries,
std::vector<uint8_t>* debug_info,
std::vector<uintptr_t>* debug_info_patches) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
Writer<> writer(debug_info);
size_t start = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
@@ -135,12 +141,14 @@ struct FileEntry {
};
// Write line table to .debug_line section.
-template<typename Allocator>
+template<typename Vector>
void WriteDebugLineTable(const std::vector<std::string>& include_directories,
const std::vector<FileEntry>& files,
- const DebugLineOpCodeWriter<Allocator>& opcodes,
+ const DebugLineOpCodeWriter<Vector>& opcodes,
std::vector<uint8_t>* debug_line,
std::vector<uintptr_t>* debug_line_patches) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
Writer<> writer(debug_line);
size_t header_start = writer.data()->size();
writer.PushUint32(0); // Section-length placeholder.
@@ -151,13 +159,13 @@ void WriteDebugLineTable(const std::vector<std::string>& include_directories,
size_t header_length_pos = writer.data()->size();
writer.PushUint32(0); // Header-length placeholder.
writer.PushUint8(1 << opcodes.GetCodeFactorBits());
- writer.PushUint8(DebugLineOpCodeWriter<Allocator>::kDefaultIsStmt ? 1 : 0);
- writer.PushInt8(DebugLineOpCodeWriter<Allocator>::kLineBase);
- writer.PushUint8(DebugLineOpCodeWriter<Allocator>::kLineRange);
- writer.PushUint8(DebugLineOpCodeWriter<Allocator>::kOpcodeBase);
- static const int opcode_lengths[DebugLineOpCodeWriter<Allocator>::kOpcodeBase] = {
+ writer.PushUint8(DebugLineOpCodeWriter<Vector>::kDefaultIsStmt ? 1 : 0);
+ writer.PushInt8(DebugLineOpCodeWriter<Vector>::kLineBase);
+ writer.PushUint8(DebugLineOpCodeWriter<Vector>::kLineRange);
+ writer.PushUint8(DebugLineOpCodeWriter<Vector>::kOpcodeBase);
+ static const int opcode_lengths[DebugLineOpCodeWriter<Vector>::kOpcodeBase] = {
0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 };
- for (int i = 1; i < DebugLineOpCodeWriter<Allocator>::kOpcodeBase; i++) {
+ for (int i = 1; i < DebugLineOpCodeWriter<Vector>::kOpcodeBase; i++) {
writer.PushUint8(opcode_lengths[i]);
}
for (const std::string& directory : include_directories) {
diff --git a/compiler/dwarf/writer.h b/compiler/dwarf/writer.h
index e703aeea2d..42c32c4303 100644
--- a/compiler/dwarf/writer.h
+++ b/compiler/dwarf/writer.h
@@ -26,8 +26,10 @@ namespace art {
namespace dwarf {
// The base class for all DWARF writers.
-template<typename Allocator = std::allocator<uint8_t>>
+template <typename Vector = std::vector<uint8_t>>
class Writer {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
void PushUint8(int value) {
DCHECK_GE(value, 0);
@@ -116,8 +118,9 @@ class Writer {
data_->insert(data_->end(), p, p + size);
}
- template<typename Allocator2>
- void PushData(const std::vector<uint8_t, Allocator2>* buffer) {
+ template<typename Vector2>
+ void PushData(const Vector2* buffer) {
+ static_assert(std::is_same<typename Vector2::value_type, uint8_t>::value, "Invalid value type");
data_->insert(data_->end(), buffer->begin(), buffer->end());
}
@@ -155,14 +158,14 @@ class Writer {
data_->resize(RoundUp(data_->size(), alignment), 0);
}
- const std::vector<uint8_t, Allocator>* data() const {
+ const Vector* data() const {
return data_;
}
- explicit Writer(std::vector<uint8_t, Allocator>* buffer) : data_(buffer) { }
+ explicit Writer(Vector* buffer) : data_(buffer) { }
private:
- std::vector<uint8_t, Allocator>* data_;
+ Vector* const data_;
DISALLOW_COPY_AND_ASSIGN(Writer);
};
diff --git a/compiler/gc_map_builder.h b/compiler/gc_map_builder.h
index 45e3fc5589..2ef7f1a659 100644
--- a/compiler/gc_map_builder.h
+++ b/compiler/gc_map_builder.h
@@ -26,14 +26,16 @@ namespace art {
class GcMapBuilder {
public:
- template <typename Alloc>
- GcMapBuilder(std::vector<uint8_t, Alloc>* table, size_t entries, uint32_t max_native_offset,
+ template <typename Vector>
+ GcMapBuilder(Vector* table, size_t entries, uint32_t max_native_offset,
size_t references_width)
: entries_(entries), references_width_(entries != 0u ? references_width : 0u),
native_offset_width_(entries != 0 && max_native_offset != 0
? sizeof(max_native_offset) - CLZ(max_native_offset) / 8u
: 0u),
in_use_(entries) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
// Resize table and set up header.
table->resize((EntryWidth() * entries) + sizeof(uint32_t));
table_ = table->data();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index d9f8fcb43a..4310be6464 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -55,6 +55,7 @@
#include "mirror/string-inl.h"
#include "oat.h"
#include "oat_file.h"
+#include "oat_file_manager.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
@@ -126,8 +127,6 @@ bool ImageWriter::Write(const std::string& image_filename,
const std::string& oat_location) {
CHECK(!image_filename.empty());
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == nullptr) {
PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
@@ -141,7 +140,8 @@ bool ImageWriter::Write(const std::string& image_filename,
oat_file->Erase();
return false;
}
- CHECK_EQ(class_linker->RegisterOatFile(oat_file_), oat_file_);
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(
+ std::unique_ptr<const OatFile>(oat_file_));
interpreter_to_interpreter_bridge_offset_ =
oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b6a40a203b..a45df95d1d 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
#include "compiler_callbacks.h"
@@ -86,7 +87,37 @@ JitCompiler::JitCompiler() : total_time_(0) {
/* init_failure_output */ nullptr,
/* abort_on_hard_verifier_failure */ false));
const InstructionSet instruction_set = kRuntimeISA;
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ VLOG(compiler) << "JIT compiler option " << option;
+ std::string error_msg;
+ if (option.starts_with("--instruction-set-variant=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ VLOG(compiler) << "JIT instruction set variant " << str;
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ } else if (option.starts_with("--instruction-set-features=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ VLOG(compiler) << "JIT instruction set features " << str;
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, "default", &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ }
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ }
cumulative_logger_.reset(new CumulativeLogger("jit times"));
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 8b5fdc3fcb..16b4386938 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -325,156 +325,146 @@ static constexpr uint8_t expected_cfi_kX86_64[] = {
// 0x0000007f: .cfi_def_cfa_offset: 128
static constexpr uint8_t expected_asm_kMips[] = {
- 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB8, 0xAF,
- 0x34, 0x00, 0xAF, 0xAF, 0x30, 0x00, 0xAE, 0xAF, 0x2C, 0x00, 0xAD, 0xAF,
- 0x28, 0x00, 0xAC, 0xAF, 0x24, 0x00, 0xAB, 0xAF, 0x20, 0x00, 0xAA, 0xAF,
- 0x1C, 0x00, 0xA9, 0xAF, 0x18, 0x00, 0xA8, 0xAF, 0x00, 0x00, 0xA4, 0xAF,
- 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7, 0x4C, 0x00, 0xA6, 0xAF,
- 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27, 0x20, 0x00, 0xBD, 0x27,
- 0x18, 0x00, 0xA8, 0x8F, 0x1C, 0x00, 0xA9, 0x8F, 0x20, 0x00, 0xAA, 0x8F,
- 0x24, 0x00, 0xAB, 0x8F, 0x28, 0x00, 0xAC, 0x8F, 0x2C, 0x00, 0xAD, 0x8F,
- 0x30, 0x00, 0xAE, 0x8F, 0x34, 0x00, 0xAF, 0x8F, 0x38, 0x00, 0xB8, 0x8F,
- 0x3C, 0x00, 0xBF, 0x8F, 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03,
- 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xBE, 0xAF,
+ 0x34, 0x00, 0xB7, 0xAF, 0x30, 0x00, 0xB6, 0xAF, 0x2C, 0x00, 0xB5, 0xAF,
+ 0x28, 0x00, 0xB4, 0xAF, 0x24, 0x00, 0xB3, 0xAF, 0x20, 0x00, 0xB2, 0xAF,
+ 0x00, 0x00, 0xA4, 0xAF, 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xAC, 0xE7,
+ 0x4C, 0x00, 0xA6, 0xAF, 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27,
+ 0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F,
+ 0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F,
+ 0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
+ 0x40, 0x00, 0xBD, 0x27, 0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
static constexpr uint8_t expected_cfi_kMips[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x98, 0x02, 0x44, 0x8F, 0x03,
- 0x44, 0x8E, 0x04, 0x44, 0x8D, 0x05, 0x44, 0x8C, 0x06, 0x44, 0x8B, 0x07,
- 0x44, 0x8A, 0x08, 0x44, 0x89, 0x09, 0x44, 0x88, 0x0A, 0x58, 0x0E, 0x60,
- 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xC8, 0x44, 0xC9, 0x44, 0xCA, 0x44, 0xCB,
- 0x44, 0xCC, 0x44, 0xCD, 0x44, 0xCE, 0x44, 0xCF, 0x44, 0xD8, 0x44, 0xDF,
- 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
+ 0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03,
+ 0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07,
+ 0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2,
+ 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE,
+ 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48, 0x0B, 0x0E, 0x40,
};
// 0x00000000: addiu r29, r29, -64
// 0x00000004: .cfi_def_cfa_offset: 64
// 0x00000004: sw r31, +60(r29)
// 0x00000008: .cfi_offset: r31 at cfa-4
-// 0x00000008: sw r24, +56(r29)
-// 0x0000000c: .cfi_offset: r24 at cfa-8
-// 0x0000000c: sw r15, +52(r29)
-// 0x00000010: .cfi_offset: r15 at cfa-12
-// 0x00000010: sw r14, +48(r29)
-// 0x00000014: .cfi_offset: r14 at cfa-16
-// 0x00000014: sw r13, +44(r29)
-// 0x00000018: .cfi_offset: r13 at cfa-20
-// 0x00000018: sw r12, +40(r29)
-// 0x0000001c: .cfi_offset: r12 at cfa-24
-// 0x0000001c: sw r11, +36(r29)
-// 0x00000020: .cfi_offset: r11 at cfa-28
-// 0x00000020: sw r10, +32(r29)
-// 0x00000024: .cfi_offset: r10 at cfa-32
-// 0x00000024: sw r9, +28(r29)
-// 0x00000028: .cfi_offset: r9 at cfa-36
-// 0x00000028: sw r8, +24(r29)
-// 0x0000002c: .cfi_offset: r8 at cfa-40
-// 0x0000002c: sw r4, +0(r29)
-// 0x00000030: sw r5, +68(r29)
-// 0x00000034: swc1 f12, +72(r29)
-// 0x00000038: sw r6, +76(r29)
-// 0x0000003c: sw r7, +80(r29)
-// 0x00000040: addiu r29, r29, -32
-// 0x00000044: .cfi_def_cfa_offset: 96
-// 0x00000044: addiu r29, r29, 32
-// 0x00000048: .cfi_def_cfa_offset: 64
-// 0x00000048: .cfi_remember_state
-// 0x00000048: lw r8, +24(r29)
-// 0x0000004c: .cfi_restore: r8
-// 0x0000004c: lw r9, +28(r29)
-// 0x00000050: .cfi_restore: r9
-// 0x00000050: lw r10, +32(r29)
-// 0x00000054: .cfi_restore: r10
-// 0x00000054: lw r11, +36(r29)
-// 0x00000058: .cfi_restore: r11
-// 0x00000058: lw r12, +40(r29)
-// 0x0000005c: .cfi_restore: r12
-// 0x0000005c: lw r13, +44(r29)
-// 0x00000060: .cfi_restore: r13
-// 0x00000060: lw r14, +48(r29)
-// 0x00000064: .cfi_restore: r14
-// 0x00000064: lw r15, +52(r29)
-// 0x00000068: .cfi_restore: r15
-// 0x00000068: lw r24, +56(r29)
-// 0x0000006c: .cfi_restore: r24
-// 0x0000006c: lw r31, +60(r29)
-// 0x00000070: .cfi_restore: r31
-// 0x00000070: addiu r29, r29, 64
-// 0x00000074: .cfi_def_cfa_offset: 0
-// 0x00000074: jr r31
-// 0x00000078: nop
-// 0x0000007c: .cfi_restore_state
-// 0x0000007c: .cfi_def_cfa_offset: 64
+// 0x00000008: sw r30, +56(r29)
+// 0x0000000c: .cfi_offset: r30 at cfa-8
+// 0x0000000c: sw r23, +52(r29)
+// 0x00000010: .cfi_offset: r23 at cfa-12
+// 0x00000010: sw r22, +48(r29)
+// 0x00000014: .cfi_offset: r22 at cfa-16
+// 0x00000014: sw r21, +44(r29)
+// 0x00000018: .cfi_offset: r21 at cfa-20
+// 0x00000018: sw r20, +40(r29)
+// 0x0000001c: .cfi_offset: r20 at cfa-24
+// 0x0000001c: sw r19, +36(r29)
+// 0x00000020: .cfi_offset: r19 at cfa-28
+// 0x00000020: sw r18, +32(r29)
+// 0x00000024: .cfi_offset: r18 at cfa-32
+// 0x00000024: sw r4, +0(r29)
+// 0x00000028: sw r5, +68(r29)
+// 0x0000002c: swc1 f12, +72(r29)
+// 0x00000030: sw r6, +76(r29)
+// 0x00000034: sw r7, +80(r29)
+// 0x00000038: addiu r29, r29, -32
+// 0x0000003c: .cfi_def_cfa_offset: 96
+// 0x0000003c: addiu r29, r29, 32
+// 0x00000040: .cfi_def_cfa_offset: 64
+// 0x00000040: .cfi_remember_state
+// 0x00000040: lw r18, +32(r29)
+// 0x00000044: .cfi_restore: r18
+// 0x00000044: lw r19, +36(r29)
+// 0x00000048: .cfi_restore: r19
+// 0x00000048: lw r20, +40(r29)
+// 0x0000004c: .cfi_restore: r20
+// 0x0000004c: lw r21, +44(r29)
+// 0x00000050: .cfi_restore: r21
+// 0x00000050: lw r22, +48(r29)
+// 0x00000054: .cfi_restore: r22
+// 0x00000054: lw r23, +52(r29)
+// 0x00000058: .cfi_restore: r23
+// 0x00000058: lw r30, +56(r29)
+// 0x0000005c: .cfi_restore: r30
+// 0x0000005c: lw r31, +60(r29)
+// 0x00000060: .cfi_restore: r31
+// 0x00000060: addiu r29, r29, 64
+// 0x00000064: .cfi_def_cfa_offset: 0
+// 0x00000064: jr r31
+// 0x00000068: nop
+// 0x0000006c: .cfi_restore_state
+// 0x0000006c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
- 0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
- 0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF,
- 0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF,
- 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF,
- 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF,
- 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF,
- 0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF,
- 0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF,
- 0x50, 0x00, 0xBE, 0xDF, 0x58, 0x00, 0xBF, 0xDF, 0x60, 0x00, 0xBD, 0x67,
+ 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF,
+ 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF,
+ 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF,
+ 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF,
+ 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF,
+ 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF,
+ 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF,
+ 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF,
+ 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
static constexpr uint8_t expected_cfi_kMips64[] = {
- 0x44, 0x0E, 0x60, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
+ 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E,
- 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x80, 0x01, 0x44, 0x0E,
- 0x60, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
+ 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E,
+ 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48,
- 0x0B, 0x0E, 0x60,
+ 0x0B, 0x0E, 0x70,
};
-// 0x00000000: daddiu r29, r29, -96
-// 0x00000004: .cfi_def_cfa_offset: 96
-// 0x00000004: sd r31, +88(r29)
+// 0x00000000: daddiu r29, r29, -112
+// 0x00000004: .cfi_def_cfa_offset: 112
+// 0x00000004: sd r31, +104(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r30, +80(r29)
+// 0x00000008: sd r30, +96(r29)
// 0x0000000c: .cfi_offset: r30 at cfa-16
-// 0x0000000c: sd r28, +72(r29)
+// 0x0000000c: sd r28, +88(r29)
// 0x00000010: .cfi_offset: r28 at cfa-24
-// 0x00000010: sd r23, +64(r29)
+// 0x00000010: sd r23, +80(r29)
// 0x00000014: .cfi_offset: r23 at cfa-32
-// 0x00000014: sd r22, +56(r29)
+// 0x00000014: sd r22, +72(r29)
// 0x00000018: .cfi_offset: r22 at cfa-40
-// 0x00000018: sd r21, +48(r29)
+// 0x00000018: sd r21, +64(r29)
// 0x0000001c: .cfi_offset: r21 at cfa-48
-// 0x0000001c: sd r20, +40(r29)
+// 0x0000001c: sd r20, +56(r29)
// 0x00000020: .cfi_offset: r20 at cfa-56
-// 0x00000020: sd r19, +32(r29)
+// 0x00000020: sd r19, +48(r29)
// 0x00000024: .cfi_offset: r19 at cfa-64
-// 0x00000024: sd r18, +24(r29)
+// 0x00000024: sd r18, +40(r29)
// 0x00000028: .cfi_offset: r18 at cfa-72
// 0x00000028: sd r4, +0(r29)
-// 0x0000002c: sw r5, +104(r29)
-// 0x00000030: swc1 f14, +108(r29)
-// 0x00000034: sw r7, +112(r29)
-// 0x00000038: sw r8, +116(r29)
+// 0x0000002c: sw r5, +120(r29)
+// 0x00000030: swc1 f14, +124(r29)
+// 0x00000034: sw r7, +128(r29)
+// 0x00000038: sw r8, +132(r29)
// 0x0000003c: daddiu r29, r29, -32
-// 0x00000040: .cfi_def_cfa_offset: 128
+// 0x00000040: .cfi_def_cfa_offset: 144
// 0x00000040: daddiu r29, r29, 32
-// 0x00000044: .cfi_def_cfa_offset: 96
+// 0x00000044: .cfi_def_cfa_offset: 112
// 0x00000044: .cfi_remember_state
-// 0x00000044: ld r18, +24(r29)
+// 0x00000044: ld r18, +40(r29)
// 0x00000048: .cfi_restore: r18
-// 0x00000048: ld r19, +32(r29)
+// 0x00000048: ld r19, +48(r29)
// 0x0000004c: .cfi_restore: r19
-// 0x0000004c: ld r20, +40(r29)
+// 0x0000004c: ld r20, +56(r29)
// 0x00000050: .cfi_restore: r20
-// 0x00000050: ld r21, +48(r29)
+// 0x00000050: ld r21, +64(r29)
// 0x00000054: .cfi_restore: r21
-// 0x00000054: ld r22, +56(r29)
+// 0x00000054: ld r22, +72(r29)
// 0x00000058: .cfi_restore: r22
-// 0x00000058: ld r23, +64(r29)
+// 0x00000058: ld r23, +80(r29)
// 0x0000005c: .cfi_restore: r23
-// 0x0000005c: ld r28, +72(r29)
+// 0x0000005c: ld r28, +88(r29)
// 0x00000060: .cfi_restore: r28
-// 0x00000060: ld r30, +80(r29)
+// 0x00000060: ld r30, +96(r29)
// 0x00000064: .cfi_restore: r30
-// 0x00000064: ld r31, +88(r29)
+// 0x00000064: ld r31, +104(r29)
// 0x00000068: .cfi_restore: r31
-// 0x00000068: daddiu r29, r29, 96
+// 0x00000068: daddiu r29, r29, 112
// 0x0000006c: .cfi_def_cfa_offset: 0
// 0x0000006c: jr r31
// 0x00000070: nop
// 0x00000074: .cfi_restore_state
-// 0x00000074: .cfi_def_cfa_offset: 96
+// 0x00000074: .cfi_def_cfa_offset: 112
+
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 953dfcb2c3..34f0802444 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -67,6 +67,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
InstructionSet instruction_set = driver->GetInstructionSet();
+ const InstructionSetFeatures* instruction_set_features = driver->GetInstructionSetFeatures();
const bool is_64_bit_target = Is64BitInstructionSet(instruction_set);
// Calling conventions used to iterate over parameters to method
std::unique_ptr<JniCallingConvention> main_jni_conv(
@@ -93,7 +94,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
// Assembler that holds generated instructions
- std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set, instruction_set_features));
jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetGenerateDebugInfo());
// Offsets into data structures
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index be2397f518..2d31a9881e 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -162,22 +162,19 @@ MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synch
}
padding_ = padding;
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T0));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T1));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T2));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T3));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T4));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T5));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T6));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T7));
- callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T8));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S2));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S3));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S4));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S5));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S6));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(S7));
+ callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(FP));
}
uint32_t MipsJniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
- result = 1 << T0 | 1 << T1 | 1 << T2 | 1 << T3 | 1 << T4 | 1 << T5 | 1 << T6 |
- 1 << T7 | 1 << T8 | 1 << RA;
+ result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << FP | 1 << RA;
return result;
}
@@ -186,7 +183,7 @@ ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
}
size_t MipsJniCallingConvention::FrameSize() {
- // Method*, LR and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kMipsPointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 3a11bcfe9c..807d740b4d 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -140,6 +140,7 @@ uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
+ DCHECK_EQ(static_cast<size_t>(POPCOUNT(result)), callee_save_regs_.size() + 1);
return result;
}
@@ -148,9 +149,9 @@ ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
}
size_t Mips64JniCallingConvention::FrameSize() {
- // Mehtod* and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kFramePointerSize +
- CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 9c7eab1cc7..b6b11ca51f 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,8 +38,7 @@ ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
}
-static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
- UNUSED(jni);
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/linker/arm/relative_patcher_thumb2.cc b/compiler/linker/arm/relative_patcher_thumb2.cc
index a3e889f0f6..5f4f760c14 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.cc
+++ b/compiler/linker/arm/relative_patcher_thumb2.cc
@@ -110,8 +110,9 @@ uint32_t Thumb2RelativePatcher::GetInsn32(ArrayRef<const uint8_t> code, uint32_t
(static_cast<uint32_t>(addr[3]) << 8);
}
-template <typename Alloc>
-uint32_t Thumb2RelativePatcher::GetInsn32(std::vector<uint8_t, Alloc>* code, uint32_t offset) {
+template <typename Vector>
+uint32_t Thumb2RelativePatcher::GetInsn32(Vector* code, uint32_t offset) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
return GetInsn32(ArrayRef<const uint8_t>(*code), offset);
}
diff --git a/compiler/linker/arm/relative_patcher_thumb2.h b/compiler/linker/arm/relative_patcher_thumb2.h
index 2d474c2db0..006d6fb9d5 100644
--- a/compiler/linker/arm/relative_patcher_thumb2.h
+++ b/compiler/linker/arm/relative_patcher_thumb2.h
@@ -37,8 +37,8 @@ class Thumb2RelativePatcher FINAL : public ArmBaseRelativePatcher {
void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
static uint32_t GetInsn32(ArrayRef<const uint8_t> code, uint32_t offset);
- template <typename Alloc>
- static uint32_t GetInsn32(std::vector<uint8_t, Alloc>* code, uint32_t offset);
+ template <typename Vector>
+ static uint32_t GetInsn32(Vector* code, uint32_t offset);
// PC displacement from patch location; Thumb2 PC is always at instruction address + 4.
static constexpr int32_t kPcDisplacement = 4;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 4ddd457ac5..a78a5b3644 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -31,6 +31,7 @@
#include "dex/verification_results.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "image_writer.h"
#include "linker/relative_patcher.h"
@@ -43,6 +44,7 @@
#include "safe_map.h"
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -143,6 +145,18 @@ OatWriter::OatWriter(const std::vector<const DexFile*>& dex_files,
}
size_ = offset;
+ if (!HasImage()) {
+ // Allocate space for app dex cache arrays in the .bss section.
+ size_t bss_start = RoundUp(size_, kPageSize);
+ size_t pointer_size = GetInstructionSetPointerSize(instruction_set);
+ bss_size_ = 0u;
+ for (const DexFile* dex_file : dex_files) {
+ dex_cache_arrays_offsets_.Put(dex_file, bss_start + bss_size_);
+ DexCacheArraysLayout layout(pointer_size, dex_file);
+ bss_size_ += layout.Size();
+ }
+ }
+
CHECK_EQ(dex_files_->size(), oat_dex_files_.size());
CHECK_EQ(compiler->IsImage(), image_writer_ != nullptr);
CHECK_EQ(compiler->IsImage(),
@@ -655,10 +669,10 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
no_thread_suspension_(soa_.Self(), "OatWriter patching"),
class_linker_(Runtime::Current()->GetClassLinker()),
dex_cache_(nullptr) {
- if (writer_->image_writer_ != nullptr) {
+ patched_code_.reserve(16 * KB);
+ if (writer_->HasImage()) {
// If we're creating the image, the address space must be ready so that we can apply patches.
CHECK(writer_->image_writer_->IsImageAddressSpaceReady());
- patched_code_.reserve(16 * KB);
}
}
@@ -841,24 +855,28 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
}
uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
auto* element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<const uint8_t*>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
const uint8_t* oat_data = writer_->image_writer_->GetOatFileBegin() + file_offset_;
return element - oat_data;
} else {
- LOG(FATAL) << "Unimplemented.";
- UNREACHABLE();
+ size_t start = writer_->dex_cache_arrays_offsets_.Get(patch.TargetDexCacheDexFile());
+ return start + patch.TargetDexCacheElementOffset();
}
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
object = writer_->image_writer_->GetImageAddress(object);
+ } else {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the object is in the image space.
+ DCHECK(Runtime::Current()->GetHeap()->FindSpaceFromObject(object, false)->IsImageSpace());
}
+ // Note: We only patch targeting Objects in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(object);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -870,12 +888,17 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
SHARED_REQUIRES(Locks::mutator_lock_) {
- // NOTE: Direct method pointers across oat files don't use linker patches. However, direct
- // type pointers across oat files do. (TODO: Investigate why.)
- if (writer_->image_writer_ != nullptr) {
+ if (writer_->HasImage()) {
method = writer_->image_writer_->GetImageMethodAddress(method);
+ } else if (kIsDebugBuild) {
+ // NOTE: We're using linker patches for app->boot references when the image can
+ // be relocated and therefore we need to emit .oat_patches. We're not using this
+ // for app->app references, so check that the method is an image method.
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ size_t method_offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
+ CHECK(image_space->GetImageHeader().GetMethodsSection().Contains(method_offset));
}
- // Note: We only patch ArtMethods to low 4gb since thats where the image is.
+ // Note: We only patch targeting ArtMethods in image which is in the low 4gb.
uint32_t address = PointerToLowMemUInt32(method);
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
@@ -887,9 +910,11 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor {
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t address = writer_->image_writer_ == nullptr ? target_offset :
- PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
- writer_->oat_data_offset_ + target_offset);
+ uint32_t address = target_offset;
+ if (writer_->HasImage()) {
+ address = PointerToLowMemUInt32(writer_->image_writer_->GetOatFileBegin() +
+ writer_->oat_data_offset_ + target_offset);
+ }
DCHECK_LE(offset + 4, code->size());
uint8_t* data = &(*code)[offset];
data[0] = address & 0xffu;
@@ -1091,8 +1116,6 @@ size_t OatWriter::InitOatCode(size_t offset) {
field.reset(compiler_driver_->Create ## fn_name()); \
offset += field->size();
- DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge);
- DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge);
DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
@@ -1350,8 +1373,6 @@ size_t OatWriter::WriteCode(OutputStream* out, const size_t file_offset, size_t
DCHECK_OFFSET(); \
} while (false)
- DO_TRAMPOLINE(interpreter_to_interpreter_bridge_);
- DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_);
DO_TRAMPOLINE(jni_dlsym_lookup_);
DO_TRAMPOLINE(quick_generic_jni_trampoline_);
DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index 760fb7c12c..a82d09eedd 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -90,6 +90,13 @@ class OatWriter {
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store);
+ // Returns whether the oat file has an associated image.
+ bool HasImage() const {
+ // Since the image is being created at the same time as the oat file,
+ // check if there's an image writer.
+ return image_writer_ != nullptr;
+ }
+
const OatHeader& GetOatHeader() const {
return *oat_header_;
}
@@ -272,6 +279,10 @@ class OatWriter {
// The size of the required .bss section holding the DexCache data.
size_t bss_size_;
+ // Offsets of the dex cache arrays for each app dex file. For the
+ // boot image, this information is provided by the ImageWriter.
+ SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_;
+
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
@@ -285,8 +296,6 @@ class OatWriter {
OatHeader* oat_header_;
std::vector<OatDexFile*> oat_dex_files_;
std::vector<OatClass*> oat_classes_;
- std::unique_ptr<const std::vector<uint8_t>> interpreter_to_interpreter_bridge_;
- std::unique_ptr<const std::vector<uint8_t>> interpreter_to_compiled_code_bridge_;
std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_;
std::unique_ptr<const std::vector<uint8_t>> quick_generic_jni_trampoline_;
std::unique_ptr<const std::vector<uint8_t>> quick_imt_conflict_trampoline_;
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index b0e83b0058..f985745e7a 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -42,8 +42,8 @@ void HBooleanSimplifier::TryRemovingNegatedCondition(HBasicBlock* block) {
// successor and the successor can only be reached from them.
static bool BlocksDoMergeTogether(HBasicBlock* block1, HBasicBlock* block2) {
if (!block1->IsSingleGoto() || !block2->IsSingleGoto()) return false;
- HBasicBlock* succ1 = block1->GetSuccessor(0);
- HBasicBlock* succ2 = block2->GetSuccessor(0);
+ HBasicBlock* succ1 = block1->GetSuccessors()[0];
+ HBasicBlock* succ2 = block2->GetSuccessors()[0];
return succ1 == succ2 && succ1->GetPredecessors().size() == 2u;
}
@@ -69,19 +69,17 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
if (cond->IsCondition()) {
HInstruction* lhs = cond->InputAt(0);
HInstruction* rhs = cond->InputAt(1);
- if (cond->IsEqual()) {
- return new (allocator) HNotEqual(lhs, rhs);
- } else if (cond->IsNotEqual()) {
- return new (allocator) HEqual(lhs, rhs);
- } else if (cond->IsLessThan()) {
- return new (allocator) HGreaterThanOrEqual(lhs, rhs);
- } else if (cond->IsLessThanOrEqual()) {
- return new (allocator) HGreaterThan(lhs, rhs);
- } else if (cond->IsGreaterThan()) {
- return new (allocator) HLessThanOrEqual(lhs, rhs);
- } else {
- DCHECK(cond->IsGreaterThanOrEqual());
- return new (allocator) HLessThan(lhs, rhs);
+ switch (cond->AsCondition()->GetOppositeCondition()) { // get *opposite*
+ case kCondEQ: return new (allocator) HEqual(lhs, rhs);
+ case kCondNE: return new (allocator) HNotEqual(lhs, rhs);
+ case kCondLT: return new (allocator) HLessThan(lhs, rhs);
+ case kCondLE: return new (allocator) HLessThanOrEqual(lhs, rhs);
+ case kCondGT: return new (allocator) HGreaterThan(lhs, rhs);
+ case kCondGE: return new (allocator) HGreaterThanOrEqual(lhs, rhs);
+ case kCondB: return new (allocator) HBelow(lhs, rhs);
+ case kCondBE: return new (allocator) HBelowOrEqual(lhs, rhs);
+ case kCondA: return new (allocator) HAbove(lhs, rhs);
+ case kCondAE: return new (allocator) HAboveOrEqual(lhs, rhs);
}
} else if (cond->IsIntConstant()) {
HIntConstant* int_const = cond->AsIntConstant();
@@ -91,11 +89,10 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
DCHECK(int_const->IsOne());
return graph->GetIntConstant(0);
}
- } else {
- // General case when 'cond' is another instruction of type boolean,
- // as verified by SSAChecker.
- return new (allocator) HBooleanNot(cond);
}
+ // General case when 'cond' is another instruction of type boolean,
+ // as verified by SSAChecker.
+ return new (allocator) HBooleanNot(cond);
}
void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
@@ -108,7 +105,7 @@ void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
if (!BlocksDoMergeTogether(true_block, false_block)) {
return;
}
- HBasicBlock* merge_block = true_block->GetSuccessor(0);
+ HBasicBlock* merge_block = true_block->GetSuccessors()[0];
if (!merge_block->HasSinglePhi()) {
return;
}
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 960f4d9b7c..bcc32403d3 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -797,8 +797,8 @@ class MonotonicValueRange : public ValueRange {
HBasicBlock* new_pre_header = header->GetDominator();
DCHECK(new_pre_header == header->GetLoopInformation()->GetPreHeader());
HBasicBlock* if_block = new_pre_header->GetDominator();
- HBasicBlock* dummy_block = if_block->GetSuccessor(0); // True successor.
- HBasicBlock* deopt_block = if_block->GetSuccessor(1); // False successor.
+ HBasicBlock* dummy_block = if_block->GetSuccessors()[0]; // True successor.
+ HBasicBlock* deopt_block = if_block->GetSuccessors()[1]; // False successor.
dummy_block->AddInstruction(new (graph->GetArena()) HGoto());
deopt_block->AddInstruction(new (graph->GetArena()) HGoto());
@@ -845,14 +845,14 @@ class MonotonicValueRange : public ValueRange {
DCHECK(header->IsLoopHeader());
HBasicBlock* pre_header = header->GetDominator();
if (loop_entry_test_block_added) {
- DCHECK(deopt_block->GetSuccessor(0) == pre_header);
+ DCHECK(deopt_block->GetSuccessors()[0] == pre_header);
} else {
DCHECK(deopt_block == pre_header);
}
HGraph* graph = header->GetGraph();
HSuspendCheck* suspend_check = header->GetLoopInformation()->GetSuspendCheck();
if (loop_entry_test_block_added) {
- DCHECK_EQ(deopt_block, header->GetDominator()->GetDominator()->GetSuccessor(1));
+ DCHECK_EQ(deopt_block, header->GetDominator()->GetDominator()->GetSuccessors()[1]);
}
HIntConstant* const_instr = graph->GetIntConstant(constant);
@@ -926,7 +926,7 @@ class MonotonicValueRange : public ValueRange {
DCHECK(header->IsLoopHeader());
HBasicBlock* pre_header = header->GetDominator();
if (loop_entry_test_block_added) {
- DCHECK(deopt_block->GetSuccessor(0) == pre_header);
+ DCHECK(deopt_block->GetSuccessors()[0] == pre_header);
} else {
DCHECK(deopt_block == pre_header);
}
@@ -965,7 +965,8 @@ class MonotonicValueRange : public ValueRange {
suspend_check->GetEnvironment(), header);
}
- HArrayLength* new_array_length = new (graph->GetArena()) HArrayLength(array);
+ HArrayLength* new_array_length
+ = new (graph->GetArena()) HArrayLength(array, array->GetDexPc());
deopt_block->InsertInstructionBefore(new_array_length, deopt_block->GetLastInstruction());
if (loop_entry_test_block_added) {
@@ -1145,7 +1146,6 @@ class BCEVisitor : public HGraphVisitor {
return nullptr;
}
uint32_t block_id = basic_block->GetBlockId();
- DCHECK_LT(block_id, maps_.size());
return &maps_[block_id];
}
@@ -1495,10 +1495,10 @@ class BCEVisitor : public HGraphVisitor {
// Start with input 1. Input 0 is from the incoming block.
HInstruction* input1 = phi->InputAt(1);
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
- *phi->GetBlock()->GetPredecessor(1)));
+ *phi->GetBlock()->GetPredecessors()[1]));
for (size_t i = 2, e = phi->InputCount(); i < e; ++i) {
DCHECK(phi->GetBlock()->GetLoopInformation()->IsBackEdge(
- *phi->GetBlock()->GetPredecessor(i)));
+ *phi->GetBlock()->GetPredecessors()[i]));
if (input1 != phi->InputAt(i)) {
return false;
}
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 08e1e3682b..ce6dc75741 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -91,7 +91,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block2);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check2 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
HArraySet* array_set = new (&allocator_) HArraySet(
@@ -104,7 +104,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block3);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
cmp = new (&allocator_) HLessThan(parameter2, array_length);
if_inst = new (&allocator_) HIf(cmp);
block3->AddInstruction(null_check);
@@ -115,7 +115,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block4);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -128,7 +128,7 @@ TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block5);
null_check = new (&allocator_) HNullCheck(parameter1, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -190,7 +190,7 @@ TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
graph_->AddBlock(block2);
HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(add);
@@ -245,7 +245,7 @@ TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(block1);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(null_check);
@@ -308,7 +308,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
entry->AddSuccessor(block);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check6 = new (&allocator_)
HBoundsCheck(constant_6, array_length, 0);
HInstruction* array_set = new (&allocator_) HArraySet(
@@ -319,7 +319,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
block->AddInstruction(array_set);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(constant_5, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -330,7 +330,7 @@ TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
block->AddInstruction(array_set);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(constant_4, array_length, 0);
array_set = new (&allocator_) HArraySet(
@@ -389,7 +389,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* cmp = nullptr;
if (cond == kCondGE) {
cmp = new (allocator) HGreaterThanOrEqual(phi, array_length);
@@ -406,7 +406,7 @@ static HInstruction* BuildSSAGraph1(HGraph* graph,
phi->AddInput(constant_initial);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -489,7 +489,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
graph->AddBlock(block);
entry->AddSuccessor(block);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(new (allocator) HGoto());
@@ -522,7 +522,7 @@ static HInstruction* BuildSSAGraph2(HGraph *graph,
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_minus_1);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -631,7 +631,7 @@ static HInstruction* BuildSSAGraph3(HGraph* graph,
phi->AddInput(constant_initial);
HNullCheck* null_check = new (allocator) HNullCheck(new_array, 0);
- HArrayLength* array_length = new (allocator) HArrayLength(null_check);
+ HArrayLength* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
@@ -716,7 +716,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
HPhi* phi = new (allocator) HPhi(allocator, 0, 0, Primitive::kPrimInt);
HInstruction* null_check = new (allocator) HNullCheck(parameter, 0);
- HInstruction* array_length = new (allocator) HArrayLength(null_check);
+ HInstruction* array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* cmp = nullptr;
if (cond == kCondGE) {
cmp = new (allocator) HGreaterThanOrEqual(phi, array_length);
@@ -732,7 +732,7 @@ static HInstruction* BuildSSAGraph4(HGraph* graph,
phi->AddInput(constant_initial);
null_check = new (allocator) HNullCheck(parameter, 0);
- array_length = new (allocator) HArrayLength(null_check);
+ array_length = new (allocator) HArrayLength(null_check, 0);
HInstruction* sub = new (allocator) HSub(Primitive::kPrimInt, array_length, phi);
HInstruction* add_minus_1 = new (allocator)
HAdd(Primitive::kPrimInt, sub, constant_minus_1);
@@ -811,7 +811,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
graph_->AddBlock(outer_header);
HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check, 0);
HAdd* add = new (&allocator_) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
HIf* if_inst = new (&allocator_) HIf(cmp);
@@ -827,7 +827,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
graph_->AddBlock(inner_header);
HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HSub* sub = new (&allocator_) HSub(Primitive::kPrimInt, array_length, phi_i);
add = new (&allocator_) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
@@ -844,20 +844,20 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
graph_->AddBlock(inner_body_compare);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
HArrayGet* array_get_j = new (&allocator_)
- HArrayGet(null_check, bounds_check1, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check1, Primitive::kPrimInt, 0);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
HInstruction* j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
HArrayGet* array_get_j_plus_1 = new (&allocator_)
- HArrayGet(null_check, bounds_check2, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check2, Primitive::kPrimInt, 0);
cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
if_inst = new (&allocator_) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
@@ -873,10 +873,10 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
// temp = array[j+1]
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
array_get_j_plus_1 = new (&allocator_)
- HArrayGet(null_check, bounds_check3, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check3, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
@@ -884,16 +884,16 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
array_get_j = new (&allocator_)
- HArrayGet(null_check, bounds_check4, Primitive::kPrimInt);
+ HArrayGet(null_check, bounds_check4, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
HArraySet* array_set_j_plus_1 = new (&allocator_)
HArraySet(null_check, bounds_check5, array_get_j, Primitive::kPrimInt, 0);
@@ -903,7 +903,7 @@ TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
null_check = new (&allocator_) HNullCheck(parameter, 0);
- array_length = new (&allocator_) HArrayLength(null_check);
+ array_length = new (&allocator_) HArrayLength(null_check, 0);
HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
HArraySet* array_set_j = new (&allocator_)
HArraySet(null_check, bounds_check6, array_get_j_plus_1, Primitive::kPrimInt, 0);
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index ebbfb14190..21540e8ed7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -375,7 +375,7 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
// We do not split each edge separately, but rather create one boundary block
// that all predecessors are relinked to. This preserves loop headers (b/23895756).
for (auto entry : try_block_info) {
- HBasicBlock* try_block = graph_->GetBlock(entry.first);
+ HBasicBlock* try_block = graph_->GetBlocks()[entry.first];
for (HBasicBlock* predecessor : try_block->GetPredecessors()) {
if (GetTryItem(predecessor, try_block_info) != entry.second) {
// Found a predecessor not covered by the same TryItem. Insert entering
@@ -392,10 +392,10 @@ void HGraphBuilder::InsertTryBoundaryBlocks(const DexFile::CodeItem& code_item)
// Do a second pass over the try blocks and insert exit TryBoundaries where
// the successor is not in the same TryItem.
for (auto entry : try_block_info) {
- HBasicBlock* try_block = graph_->GetBlock(entry.first);
+ HBasicBlock* try_block = graph_->GetBlocks()[entry.first];
// NOTE: Do not use iterators because SplitEdge would invalidate them.
for (size_t i = 0, e = try_block->GetSuccessors().size(); i < e; ++i) {
- HBasicBlock* successor = try_block->GetSuccessor(i);
+ HBasicBlock* successor = try_block->GetSuccessors()[i];
// If the successor is a try block, all of its predecessors must be
// covered by the same TryItem. Otherwise the previous pass would have
@@ -581,7 +581,6 @@ bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
HBasicBlock* HGraphBuilder::FindBlockStartingAt(int32_t dex_pc) const {
DCHECK_GE(dex_pc, 0);
- DCHECK_LT(static_cast<size_t>(dex_pc), branch_targets_.size());
return branch_targets_[dex_pc];
}
@@ -940,7 +939,8 @@ HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
storage_index,
*dex_compilation_unit_->GetDexFile(),
is_outer_class,
- dex_pc);
+ dex_pc,
+ /*needs_access_check*/ false);
current_block_->AddInstruction(load_class);
clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
current_block_->AddInstruction(clinit_check);
@@ -1175,10 +1175,9 @@ void HGraphBuilder::PotentiallySimplifyFakeString(uint16_t original_dex_register
verified_method->GetStringInitPcRegMap();
auto map_it = string_init_map.find(dex_pc);
if (map_it != string_init_map.end()) {
- std::set<uint32_t> reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
+ for (uint32_t reg : map_it->second) {
HInstruction* load_local = LoadLocal(original_dex_register, Primitive::kPrimNot, dex_pc);
- UpdateLocal(*set_it, load_local, dex_pc);
+ UpdateLocal(reg, load_local, dex_pc);
}
}
} else {
@@ -1302,7 +1301,13 @@ bool HGraphBuilder::IsOutermostCompilingClass(uint16_t type_index) const {
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
- return outer_class.Get() == cls.Get();
+ // GetOutermostCompilingClass returns null when the class is unresolved
+ // (e.g. if it derives from an unresolved class). This is bogus knowing that
+ // we are compiling it.
+ // When this happens we cannot establish a direct relation between the current
+ // class and the outer class, so we return false.
+ // (Note that this is only used for optimizing invokes and field accesses)
+ return (cls.Get() != nullptr) && (outer_class.Get() == cls.Get());
}
void HGraphBuilder::BuildUnresolvedStaticFieldAccess(const Instruction& instruction,
@@ -1384,7 +1389,8 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
storage_index,
*dex_compilation_unit_->GetDexFile(),
is_outer_class,
- dex_pc);
+ dex_pc,
+ /*needs_access_check*/ false);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
@@ -1615,7 +1621,9 @@ void HGraphBuilder::BuildFillWideArrayData(HInstruction* object,
static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (cls->IsInterface()) {
+ if (cls.Get() == nullptr) {
+ return TypeCheckKind::kUnresolvedCheck;
+ } else if (cls->IsInterface()) {
return TypeCheckKind::kInterfaceCheck;
} else if (cls->IsArrayClass()) {
if (cls->GetComponentType()->IsObjectClass()) {
@@ -1634,11 +1642,20 @@ static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
}
}
-bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
+void HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
uint16_t type_index,
uint32_t dex_pc) {
+ bool type_known_final, type_known_abstract, use_declaring_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(),
+ *dex_compilation_unit_->GetDexFile(),
+ type_index,
+ &type_known_final,
+ &type_known_abstract,
+ &use_declaring_class);
+
ScopedObjectAccess soa(Thread::Current());
StackHandleScope<2> hs(soa.Self());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(
@@ -1646,22 +1663,14 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
soa.Self(), *dex_compilation_unit_->GetDexFile())));
Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
- if ((resolved_class.Get() == nullptr) ||
- // TODO: Remove this check once the compiler actually knows which
- // ArtMethod it is compiling.
- (GetCompilingClass() == nullptr) ||
- !GetCompilingClass()->CanAccess(resolved_class.Get())) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
- return false;
- }
-
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot, dex_pc);
HLoadClass* cls = new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
- dex_pc);
+ dex_pc,
+ !can_access);
current_block_->AddInstruction(cls);
// The class needs a temporary before being used by the type check.
@@ -1676,7 +1685,6 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST);
current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc));
}
- return true;
}
bool HGraphBuilder::NeedsAccessCheck(uint32_t type_index) const {
@@ -2791,16 +2799,13 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
&type_known_final, &type_known_abstract, &dont_use_is_referrers_class);
- if (!can_access) {
- MaybeRecordStat(MethodCompilationStat::kNotCompiledCantAccesType);
- return false;
- }
current_block_->AddInstruction(new (arena_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
- dex_pc));
+ dex_pc,
+ !can_access));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction(), dex_pc);
break;
}
@@ -2827,18 +2832,14 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint8_t destination = instruction.VRegA_22c();
uint8_t reference = instruction.VRegB_22c();
uint16_t type_index = instruction.VRegC_22c();
- if (!BuildTypeCheck(instruction, destination, reference, type_index, dex_pc)) {
- return false;
- }
+ BuildTypeCheck(instruction, destination, reference, type_index, dex_pc);
break;
}
case Instruction::CHECK_CAST: {
uint8_t reference = instruction.VRegA_21c();
uint16_t type_index = instruction.VRegB_21c();
- if (!BuildTypeCheck(instruction, -1, reference, type_index, dex_pc)) {
- return false;
- }
+ BuildTypeCheck(instruction, -1, reference, type_index, dex_pc);
break;
}
@@ -2880,7 +2881,6 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
} // NOLINT(readability/fn_size)
HLocal* HGraphBuilder::GetLocalAt(uint32_t register_index) const {
- DCHECK_LT(register_index, locals_.size());
return locals_[register_index];
}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index b2dc24169e..6910d5195c 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -236,8 +236,7 @@ class HGraphBuilder : public ValueObject {
uint32_t dex_pc);
// Builds a `HInstanceOf`, or a `HCheckCast` instruction.
- // Returns whether we succeeded in building the instruction.
- bool BuildTypeCheck(const Instruction& instruction,
+ void BuildTypeCheck(const Instruction& instruction,
uint8_t destination,
uint8_t reference,
uint16_t type_index,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8254277f96..6a743ebbc9 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -41,6 +41,7 @@
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
#include "graph_visualizer.h"
+#include "intrinsics.h"
#include "leb128.h"
#include "mapping_table.h"
#include "mirror/array-inl.h"
@@ -155,7 +156,6 @@ void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
}
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
- DCHECK_LT(current_block_index_, block_order_->size());
DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
}
@@ -172,7 +172,7 @@ HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
while (block->IsSingleJump()) {
- block = block->GetSuccessor(0);
+ block = block->GetSuccessors()[0];
}
return block;
}
@@ -537,6 +537,27 @@ void CodeGenerator::GenerateUnresolvedFieldAccess(
}
}
+void CodeGenerator::CreateLoadClassLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location) {
+ ArenaAllocator* allocator = cls->GetBlock()->GetGraph()->GetArena();
+ LocationSummary::CallKind call_kind = cls->NeedsAccessCheck()
+ ? LocationSummary::kCall
+ : (cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall);
+ LocationSummary* locations = new (allocator) LocationSummary(cls, call_kind);
+ if (cls->NeedsAccessCheck()) {
+ locations->SetInAt(0, Location::NoLocation());
+ locations->AddTemp(runtime_type_index_location);
+ locations->SetOut(runtime_return_location);
+ } else {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ }
+}
+
+
void CodeGenerator::BlockIfInRegister(Location location, bool is_out) const {
// The DCHECKS below check that a register is not specified twice in
// the summary. The out location can overlap with an input, so we need
@@ -873,7 +894,7 @@ void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const {
}
void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const {
- Leb128Encoder<ArenaAllocatorAdapter<uint8_t>> vmap_encoder(data);
+ Leb128Encoder<ArenaVector<uint8_t>> vmap_encoder(data);
// We currently don't use callee-saved registers.
size_t size = 0 + 1 /* marker */ + 0;
vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128).
@@ -1361,4 +1382,57 @@ void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary*
}
}
+void CodeGenerator::CreateSystemArrayCopyLocationSummary(HInvoke* invoke) {
+ // Check to see if we have known failures that will cause us to have to bail out
+ // to the runtime, and just generate the runtime call directly.
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+
+ // The positions must be non-negative.
+ if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
+ (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
+ // We will have to fail anyways.
+ return;
+ }
+
+ // The length must be >= 0.
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+ if (length != nullptr) {
+ int32_t len = length->GetValue();
+ if (len < 0) {
+ // Just call as normal.
+ return;
+ }
+ }
+
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (optimizations.GetDestinationIsSource()) {
+ if (src_pos != nullptr && dest_pos != nullptr && src_pos->GetValue() < dest_pos->GetValue()) {
+ // We only support backward copying if source and destination are the same.
+ return;
+ }
+ }
+
+ if (optimizations.GetDestinationIsPrimitiveArray() || optimizations.GetSourceIsPrimitiveArray()) {
+ // We currently don't intrinsify primitive copying.
+ return;
+ }
+
+ ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetArena();
+ LocationSummary* locations = new (allocator) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
+ locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
+
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+}
+
} // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a3ebc43f11..b04dfc00b2 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -416,6 +416,13 @@ class CodeGenerator {
uint32_t dex_pc,
const FieldAccessCallingConvention& calling_convention);
+ // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
+ static void CreateLoadClassLocationSummary(HLoadClass* cls,
+ Location runtime_type_index_location,
+ Location runtime_return_location);
+
+ static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
+
void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
@@ -526,6 +533,8 @@ class CodeGenerator {
template <typename LabelType>
LabelType* CommonInitializeLabels() {
+ // We use raw array allocations instead of ArenaVector<> because Labels are
+ // non-constructible and non-movable and as such cannot be held in a vector.
size_t size = GetGraph()->GetBlocks().size();
LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
kArenaAllocCodeGenerator);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 299350b879..3e6cad83fa 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -409,7 +409,7 @@ class ArraySetSlowPathARM : public SlowPathCode {
#undef __
#define __ down_cast<ArmAssembler*>(GetAssembler())->
-inline Condition ARMSignedOrFPCondition(IfCondition cond) {
+inline Condition ARMCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
@@ -417,19 +417,30 @@ inline Condition ARMSignedOrFPCondition(IfCondition cond) {
case kCondLE: return LE;
case kCondGT: return GT;
case kCondGE: return GE;
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition.
inline Condition ARMUnsignedCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return EQ;
case kCondNE: return NE;
+ // Signed to unsigned.
case kCondLT: return LO;
case kCondLE: return LS;
case kCondGT: return HI;
case kCondGE: return HS;
+ // Unsigned remain unchanged.
+ case kCondB: return LO;
+ case kCondBE: return LS;
+ case kCondA: return HI;
+ case kCondAE: return HS;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -1130,8 +1141,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
@@ -1149,12 +1159,13 @@ void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
Label* false_label) {
__ vmstat(); // transfer FP status register to ARM APSR.
+ // TODO: merge into a single branch (except "equal or unordered" and "not equal")
if (cond->IsFPConditionTrueIfNaN()) {
__ b(true_label, VS); // VS for unordered.
} else if (cond->IsFPConditionFalseIfNaN()) {
__ b(false_label, VS); // VS for unordered.
}
- __ b(true_label, ARMSignedOrFPCondition(cond->GetCondition()));
+ __ b(true_label, ARMCondition(cond->GetCondition()));
}
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
@@ -1169,10 +1180,11 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = ARMUnsignedCondition(if_cond);
+ Condition final_condition = ARMUnsignedCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
+ // TODO: consider avoiding jumps with temporary and CMP low+SBC high
switch (if_cond) {
case kCondEQ:
case kCondNE:
@@ -1190,6 +1202,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
@@ -1198,12 +1222,12 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
GenerateCompareWithImmediate(left_high, val_high);
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
GenerateCompareWithImmediate(left_low, val_low);
@@ -1213,17 +1237,18 @@ void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
__ cmp(left_high, ShifterOperand(right_high));
if (if_cond == kCondNE) {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
} else if (if_cond == kCondEQ) {
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
} else {
- __ b(true_label, ARMSignedOrFPCondition(true_high_cond));
- __ b(false_label, ARMSignedOrFPCondition(false_high_cond));
+ __ b(true_label, ARMCondition(true_high_cond));
+ __ b(false_label, ARMCondition(false_high_cond));
}
// Must be equal high, so compare the lows.
__ cmp(left_low, ShifterOperand(right_low));
}
// The last comparison might be unsigned.
+ // TODO: optimize cases where this is always true/false
__ b(true_label, final_condition);
}
@@ -1315,7 +1340,7 @@ void InstructionCodeGeneratorARM::GenerateTestAndBranch(HInstruction* instructio
DCHECK(right.IsConstant());
GenerateCompareWithImmediate(left, CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ b(true_target, ARMSignedOrFPCondition(cond->AsCondition()->GetCondition()));
+ __ b(true_target, ARMCondition(cond->AsCondition()->GetCondition()));
}
}
if (false_target != nullptr) {
@@ -1417,11 +1442,11 @@ void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
GenerateCompareWithImmediate(left.AsRegister<Register>(),
CodeGenerator::GetInt32ValueOf(right.GetConstant()));
}
- __ it(ARMSignedOrFPCondition(cond->GetCondition()), kItElse);
+ __ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMSignedOrFPCondition(cond->GetCondition()));
+ ARMCondition(cond->GetCondition()));
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMSignedOrFPCondition(cond->GetOppositeCondition()));
+ ARMCondition(cond->GetOppositeCondition()));
return;
}
case Primitive::kPrimLong:
@@ -1500,6 +1525,38 @@ void InstructionCodeGeneratorARM::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorARM::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderARM::VisitLocal(HLocal* local) {
local->SetLocations(nullptr);
}
@@ -1512,9 +1569,8 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -1541,8 +1597,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -1551,9 +1606,8 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
@@ -1562,9 +1616,8 @@ void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1573,9 +1626,8 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1584,9 +1636,8 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1595,9 +1646,8 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1612,8 +1662,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -1623,8 +1672,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -1645,6 +1693,7 @@ void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invok
DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+ codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -1684,6 +1733,7 @@ void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
+ codegen_->GetAssembler(),
codegen_->GetInstructionSetFeatures());
if (intrinsic.TryDispatch(invoke)) {
return;
@@ -3268,8 +3318,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -3512,6 +3561,47 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI
}
}
+Location LocationsBuilderARM::ArmEncodableConstantOrRegister(HInstruction* constant,
+ Opcode opcode) {
+ DCHECK(!Primitive::IsFloatingPointType(constant->GetType()));
+ if (constant->IsConstant() &&
+ CanEncodeConstantAsImmediate(constant->AsConstant(), opcode)) {
+ return Location::ConstantLocation(constant->AsConstant());
+ }
+ return Location::RequiresRegister();
+}
+
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(HConstant* input_cst,
+ Opcode opcode) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
+ if (Primitive::Is64BitType(input_cst->GetType())) {
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode) &&
+ CanEncodeConstantAsImmediate(High32Bits(value), opcode);
+ } else {
+ return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
+ }
+}
+
+bool LocationsBuilderARM::CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode) {
+ ShifterOperand so;
+ ArmAssembler* assembler = codegen_->GetAssembler();
+ if (assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, opcode, value, &so)) {
+ return true;
+ }
+ Opcode neg_opcode = kNoOperand;
+ switch (opcode) {
+ case AND:
+ neg_opcode = BIC;
+ break;
+ case ORR:
+ neg_opcode = ORN;
+ break;
+ default:
+ return false;
+ }
+ return assembler->ShifterOperandCanHold(kNoRegister, kNoRegister, neg_opcode, ~value, &so);
+}
+
void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -4189,13 +4279,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4253,7 +4341,6 @@ ArmAssembler* ParallelMoveResolverARM::GetAssembler() const {
}
void ParallelMoveResolverARM::EmitMove(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4386,7 +4473,6 @@ void ParallelMoveResolverARM::Exchange(int mem1, int mem2) {
}
void ParallelMoveResolverARM::EmitSwap(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4468,17 +4554,24 @@ void ParallelMoveResolverARM::RestoreScratch(int reg) {
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(R0));
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
@@ -4604,6 +4697,7 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4644,10 +4738,11 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
__ CompareAndBranchIfZero(obj, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ LoadFromOffset(kLoadWord, target, obj, class_offset);
@@ -4728,7 +4823,7 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -4769,6 +4864,7 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4873,6 +4969,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) {
__ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
@@ -4903,17 +5000,18 @@ void InstructionCodeGeneratorARM::VisitMonitorOperation(HMonitorOperation* instr
nullptr);
}
-void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction); }
-void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction); }
-void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction); }
+void LocationsBuilderARM::VisitAnd(HAnd* instruction) { HandleBitwiseOperation(instruction, AND); }
+void LocationsBuilderARM::VisitOr(HOr* instruction) { HandleBitwiseOperation(instruction, ORR); }
+void LocationsBuilderARM::VisitXor(HXor* instruction) { HandleBitwiseOperation(instruction, EOR); }
-void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
+void LocationsBuilderARM::HandleBitwiseOperation(HBinaryOperation* instruction, Opcode opcode) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
DCHECK(instruction->GetResultType() == Primitive::kPrimInt
|| instruction->GetResultType() == Primitive::kPrimLong);
+ // Note: GVN reorders commutative operations to have the constant on the right hand side.
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(1, ArmEncodableConstantOrRegister(instruction->InputAt(1), opcode));
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
@@ -4929,48 +5027,131 @@ void InstructionCodeGeneratorARM::VisitXor(HXor* instruction) {
HandleBitwiseOperation(instruction);
}
+void InstructionCodeGeneratorARM::GenerateAndConst(Register out, Register first, uint32_t value) {
+ // Optimize special cases for individual halfs of `and-long` (`and` is simplified earlier).
+ if (value == 0xffffffffu) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ if (value == 0u) {
+ __ mov(out, ShifterOperand(0));
+ return;
+ }
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, AND, value, &so)) {
+ __ and_(out, first, so);
+ } else {
+ DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, BIC, ~value, &so));
+ __ bic(out, first, ShifterOperand(~value));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateOrrConst(Register out, Register first, uint32_t value) {
+ // Optimize special cases for individual halfs of `or-long` (`or` is simplified earlier).
+ if (value == 0u) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ if (value == 0xffffffffu) {
+ __ mvn(out, ShifterOperand(0));
+ return;
+ }
+ ShifterOperand so;
+ if (__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORR, value, &so)) {
+ __ orr(out, first, so);
+ } else {
+ DCHECK(__ ShifterOperandCanHold(kNoRegister, kNoRegister, ORN, ~value, &so));
+ __ orn(out, first, ShifterOperand(~value));
+ }
+}
+
+void InstructionCodeGeneratorARM::GenerateEorConst(Register out, Register first, uint32_t value) {
+ // Optimize special case for individual halfs of `xor-long` (`xor` is simplified earlier).
+ if (value == 0u) {
+ if (out != first) {
+ __ mov(out, ShifterOperand(first));
+ }
+ return;
+ }
+ __ eor(out, first, ShifterOperand(value));
+}
+
void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instruction) {
LocationSummary* locations = instruction->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
+ Location out = locations->Out();
+
+ if (second.IsConstant()) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(second.GetConstant()));
+ uint32_t value_low = Low32Bits(value);
+ if (instruction->GetResultType() == Primitive::kPrimInt) {
+ Register first_reg = first.AsRegister<Register>();
+ Register out_reg = out.AsRegister<Register>();
+ if (instruction->IsAnd()) {
+ GenerateAndConst(out_reg, first_reg, value_low);
+ } else if (instruction->IsOr()) {
+ GenerateOrrConst(out_reg, first_reg, value_low);
+ } else {
+ DCHECK(instruction->IsXor());
+ GenerateEorConst(out_reg, first_reg, value_low);
+ }
+ } else {
+ DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
+ uint32_t value_high = High32Bits(value);
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
+ if (instruction->IsAnd()) {
+ GenerateAndConst(out_low, first_low, value_low);
+ GenerateAndConst(out_high, first_high, value_high);
+ } else if (instruction->IsOr()) {
+ GenerateOrrConst(out_low, first_low, value_low);
+ GenerateOrrConst(out_high, first_high, value_high);
+ } else {
+ DCHECK(instruction->IsXor());
+ GenerateEorConst(out_low, first_low, value_low);
+ GenerateEorConst(out_high, first_high, value_high);
+ }
+ }
+ return;
+ }
if (instruction->GetResultType() == Primitive::kPrimInt) {
- Register first = locations->InAt(0).AsRegister<Register>();
- Register second = locations->InAt(1).AsRegister<Register>();
- Register out = locations->Out().AsRegister<Register>();
+ Register first_reg = first.AsRegister<Register>();
+ ShifterOperand second_reg(second.AsRegister<Register>());
+ Register out_reg = out.AsRegister<Register>();
if (instruction->IsAnd()) {
- __ and_(out, first, ShifterOperand(second));
+ __ and_(out_reg, first_reg, second_reg);
} else if (instruction->IsOr()) {
- __ orr(out, first, ShifterOperand(second));
+ __ orr(out_reg, first_reg, second_reg);
} else {
DCHECK(instruction->IsXor());
- __ eor(out, first, ShifterOperand(second));
+ __ eor(out_reg, first_reg, second_reg);
}
} else {
DCHECK_EQ(instruction->GetResultType(), Primitive::kPrimLong);
- Location first = locations->InAt(0);
- Location second = locations->InAt(1);
- Location out = locations->Out();
+ Register first_low = first.AsRegisterPairLow<Register>();
+ Register first_high = first.AsRegisterPairHigh<Register>();
+ ShifterOperand second_low(second.AsRegisterPairLow<Register>());
+ ShifterOperand second_high(second.AsRegisterPairHigh<Register>());
+ Register out_low = out.AsRegisterPairLow<Register>();
+ Register out_high = out.AsRegisterPairHigh<Register>();
if (instruction->IsAnd()) {
- __ and_(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ and_(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ and_(out_low, first_low, second_low);
+ __ and_(out_high, first_high, second_high);
} else if (instruction->IsOr()) {
- __ orr(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ orr(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ orr(out_low, first_low, second_low);
+ __ orr(out_high, first_high, second_high);
} else {
DCHECK(instruction->IsXor());
- __ eor(out.AsRegisterPairLow<Register>(),
- first.AsRegisterPairLow<Register>(),
- ShifterOperand(second.AsRegisterPairLow<Register>()));
- __ eor(out.AsRegisterPairHigh<Register>(),
- first.AsRegisterPairHigh<Register>(),
- ShifterOperand(second.AsRegisterPairHigh<Register>()));
+ __ eor(out_low, first_low, second_low);
+ __ eor(out_high, first_high, second_high);
}
}
}
@@ -5150,15 +5331,13 @@ Literal* CodeGeneratorARM::DeduplicateMethodCodeLiteral(MethodReference target_m
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
-void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5192,7 +5371,7 @@ void InstructionCodeGeneratorARM::VisitPackedSwitch(HPackedSwitch* switch_instr)
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
for (int32_t i = 0; i < num_entries; i++) {
GenerateCompareWithImmediate(value_reg, lower_bound + i);
- __ b(codegen_->GetLabelOf(successors.at(i)), EQ);
+ __ b(codegen_->GetLabelOf(successors[i]), EQ);
}
// And the default for any other value.
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 16d1d383b4..6900933e87 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -169,11 +169,15 @@ class LocationsBuilderARM : public HGraphVisitor {
private:
void HandleInvoke(HInvoke* invoke);
- void HandleBitwiseOperation(HBinaryOperation* operation);
+ void HandleBitwiseOperation(HBinaryOperation* operation, Opcode opcode);
void HandleShift(HBinaryOperation* operation);
void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+ Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
+ bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode);
+
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitorARM parameter_visitor_;
@@ -205,6 +209,9 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
void GenerateClassInitializationCheck(SlowPathCode* slow_path, Register class_reg);
+ void GenerateAndConst(Register out, Register first, uint32_t value);
+ void GenerateOrrConst(Register out, Register first, uint32_t value);
+ void GenerateEorConst(Register out, Register first, uint32_t value);
void HandleBitwiseOperation(HBinaryOperation* operation);
void HandleShift(HBinaryOperation* operation);
void GenerateMemoryBarrier(MemBarrierKind kind);
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c7ade65cd8..ffb9b794fc 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -77,6 +77,10 @@ inline Condition ARM64Condition(IfCondition cond) {
case kCondLE: return le;
case kCondGT: return gt;
case kCondGE: return ge;
+ case kCondB: return lo;
+ case kCondBE: return ls;
+ case kCondA: return hi;
+ case kCondAE: return hs;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -658,7 +662,6 @@ void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
}
void ParallelMoveResolverARM64::EmitMove(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
}
@@ -1327,8 +1330,7 @@ enum UnimplementedInstructionBreakCode {
};
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
- UNUSED(instr); \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -1938,7 +1940,11 @@ void InstructionCodeGeneratorARM64::VisitCondition(HCondition* instruction) {
M(LessThan) \
M(LessThanOrEqual) \
M(GreaterThan) \
- M(GreaterThanOrEqual)
+ M(GreaterThanOrEqual) \
+ M(Below) \
+ M(BelowOrEqual) \
+ M(Above) \
+ M(AboveOrEqual)
#define DEFINE_CONDITION_VISITORS(Name) \
void LocationsBuilderARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); } \
void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { VisitCondition(comp); }
@@ -2176,8 +2182,8 @@ void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2185,8 +2191,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
@@ -2195,8 +2200,7 @@ void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2388,6 +2392,7 @@ void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -2429,10 +2434,11 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
__ Cbz(obj, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ Ldr(target, HeapOperand(obj.W(), class_offset));
@@ -2513,7 +2519,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -2554,6 +2560,7 @@ void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -2659,6 +2666,7 @@ void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
__ Cbnz(temp, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
@@ -2679,9 +2687,8 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
@@ -2689,9 +2696,8 @@ void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
@@ -3014,14 +3020,23 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
}
void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ LocationFrom(calling_convention.GetRegisterAt(0)),
+ LocationFrom(vixl::x0));
}
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = OutputRegister(cls);
Register current_method = InputRegisterAt(cls, 0);
if (cls->IsReferrersClass()) {
@@ -3073,9 +3088,8 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
@@ -3112,9 +3126,8 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -3381,8 +3394,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -3452,8 +3464,7 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3461,8 +3472,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
instruction->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3506,8 +3516,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -3624,9 +3633,8 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -3725,15 +3733,13 @@ void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
HandleBinaryOp(instruction);
}
-void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -3766,7 +3772,7 @@ void InstructionCodeGeneratorARM64::VisitPackedSwitch(HPackedSwitch* switch_inst
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
for (int32_t i = 0; i < num_entries; i++) {
int32_t case_value = lower_bound + i;
- vixl::Label* succ = codegen_->GetLabelOf(successors.at(i));
+ vixl::Label* succ = codegen_->GetLabelOf(successors[i]);
if (case_value == 0) {
__ Cbz(value_reg, succ);
} else {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e95d283c1a..eb20291e20 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -450,13 +450,11 @@ Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
}
void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
}
void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
}
@@ -1780,6 +1778,9 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
return;
}
+ // TODO: generalize to long
+ DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
+
LocationSummary* locations = instruction->GetLocations();
GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
@@ -1857,6 +1858,48 @@ void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
}
}
break;
+
+ case kCondB:
+ case kCondAE:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
+ __ Sltiu(dst, lhs, rhs_imm);
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, lhs, rhs_reg);
+ }
+ if (if_cond == kCondAE) {
+ // Simulate lhs >= rhs via !(lhs < rhs) since there's
+ // only the sltu instruction but no sgeu.
+ __ Xori(dst, dst, 1);
+ }
+ break;
+
+ case kCondBE:
+ case kCondA:
+ if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
+ // Simulate lhs <= rhs via lhs < rhs + 1.
+ __ Sltiu(dst, lhs, rhs_imm + 1);
+ if (if_cond == kCondA) {
+ // Simulate lhs > rhs via !(lhs <= rhs) since there's
+ // only the sltiu instruction but no sgtiu.
+ __ Xori(dst, dst, 1);
+ }
+ } else {
+ if (use_imm) {
+ rhs_reg = TMP;
+ __ LoadConst32(rhs_reg, rhs_imm);
+ }
+ __ Sltu(dst, rhs_reg, lhs);
+ if (if_cond == kCondBE) {
+ // Simulate lhs <= rhs via !(rhs < lhs) since there's
+ // only the sltu instruction but no sleu.
+ __ Xori(dst, dst, 1);
+ }
+ }
+ break;
}
}
@@ -2074,6 +2117,17 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bgtzc(lhs, true_target);
break;
+ case kCondB:
+ break; // always false
+ case kCondBE:
+ __ Beqzc(lhs, true_target); // <= 0 if zero
+ break;
+ case kCondA:
+ __ Bnezc(lhs, true_target); // > 0 if non-zero
+ break;
+ case kCondAE:
+ __ B(true_target); // always true
+ break;
}
} else {
if (use_imm) {
@@ -2088,12 +2142,16 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondEQ:
case kCondGE:
case kCondLE:
+ case kCondBE:
+ case kCondAE:
// if lhs == rhs for a positive condition, then it is a branch
__ B(true_target);
break;
case kCondNE:
case kCondLT:
case kCondGT:
+ case kCondB:
+ case kCondA:
// if lhs == rhs for a negative condition, then it is a NOP
break;
}
@@ -2117,6 +2175,18 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc
case kCondGT:
__ Bltc(rhs_reg, lhs, true_target);
break;
+ case kCondB:
+ __ Bltuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondAE:
+ __ Bgeuc(lhs, rhs_reg, true_target);
+ break;
+ case kCondBE:
+ __ Bgeuc(rhs_reg, lhs, true_target);
+ break;
+ case kCondA:
+ __ Bltuc(rhs_reg, lhs, true_target);
+ break;
}
}
}
@@ -2590,15 +2660,24 @@ void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke)
}
void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(A0));
}
void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
if (cls->IsReferrersClass()) {
@@ -3455,6 +3534,38 @@ void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
DCHECK(codegen_->IsBaseline());
LocationSummary* locations =
@@ -3485,7 +3596,7 @@ void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_ins
const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
for (int32_t i = 0; i < num_entries; i++) {
int32_t case_value = lower_bound + i;
- Label* succ = codegen_->GetLabelOf(successors.at(i));
+ Label* succ = codegen_->GetLabelOf(successors[i]);
if (case_value == 0) {
__ Beqzc(value_reg, succ);
} else {
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 5e8f9e7f30..7799437235 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -117,7 +117,7 @@ class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
return Location::RegisterLocation(A0);
}
Location GetReturnLocation(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
- return Location::RegisterLocation(A0);
+ return Location::RegisterLocation(V0);
}
Location GetSetValueLocation(
Primitive::Type type ATTRIBUTE_UNUSED, bool is_instance) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 5078456eb1..2aea859b7d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -428,7 +428,7 @@ class ArraySetSlowPathX86 : public SlowPathCode {
#undef __
#define __ down_cast<X86Assembler*>(GetAssembler())->
-inline Condition X86SignedCondition(IfCondition cond) {
+inline Condition X86Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
@@ -436,19 +436,30 @@ inline Condition X86SignedCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps signed condition to unsigned condition and FP condition to x86 name.
inline Condition X86UnsignedOrFPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
case kCondNE: return kNotEqual;
+ // Signed to unsigned, and FP to x86 name.
case kCondLT: return kBelow;
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ // Unsigned remain unchanged.
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -521,7 +532,8 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
move_resolver_(graph->GetArena(), this),
isa_features_(isa_features),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
// Use a fake return address register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1040,8 +1052,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
@@ -1067,7 +1078,7 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
Register left_low = left.AsRegisterPairLow<Register>();
IfCondition true_high_cond = if_cond;
IfCondition false_high_cond = cond->GetOppositeCondition();
- Condition final_condition = X86UnsignedOrFPCondition(if_cond);
+ Condition final_condition = X86UnsignedOrFPCondition(if_cond); // unsigned on lower part
// Set the conditions for the test, remembering that == needs to be
// decided using the low words.
@@ -1088,6 +1099,18 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
case kCondGE:
true_high_cond = kCondGT;
break;
+ case kCondB:
+ false_high_cond = kCondA;
+ break;
+ case kCondBE:
+ true_high_cond = kCondB;
+ break;
+ case kCondA:
+ false_high_cond = kCondB;
+ break;
+ case kCondAE:
+ true_high_cond = kCondA;
+ break;
}
if (right.IsConstant()) {
@@ -1101,12 +1124,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, Immediate(val_high));
}
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
if (val_low == 0) {
@@ -1120,12 +1143,12 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond,
__ cmpl(left_high, right_high);
if (if_cond == kCondNE) {
- __ j(X86SignedCondition(true_high_cond), true_label);
+ __ j(X86Condition(true_high_cond), true_label);
} else if (if_cond == kCondEQ) {
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(false_high_cond), false_label);
} else {
- __ j(X86SignedCondition(true_high_cond), true_label);
- __ j(X86SignedCondition(false_high_cond), false_label);
+ __ j(X86Condition(true_high_cond), true_label);
+ __ j(X86Condition(false_high_cond), false_label);
}
// Must be equal high, so compare the lows.
__ cmpl(left_low, right_low);
@@ -1214,7 +1237,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
}
__ j(kNotEqual, true_target);
} else {
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
} else {
// Condition has not been materialized, use its inputs as the
@@ -1247,7 +1270,7 @@ void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instructio
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ j(X86SignedCondition(cond->AsCondition()->GetCondition()), true_target);
+ __ j(X86Condition(cond->AsCondition()->GetCondition()), true_target);
}
}
if (false_target != nullptr) {
@@ -1309,9 +1332,8 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -1338,8 +1360,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitCondition(HCondition* cond) {
@@ -1405,7 +1426,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86SignedCondition(cond->GetCondition()), reg);
+ __ setb(X86Condition(cond->GetCondition()), reg);
return;
}
case Primitive::kPrimLong:
@@ -1483,15 +1504,46 @@ void InstructionCodeGeneratorX86::VisitGreaterThanOrEqual(HGreaterThanOrEqual* c
VisitCondition(comp);
}
+void LocationsBuilderX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
@@ -1500,9 +1552,8 @@ void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -1511,9 +1562,8 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -1522,9 +1572,8 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1533,9 +1582,8 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1550,8 +1598,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3685,8 +3732,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4684,13 +4730,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4768,7 +4812,6 @@ void ParallelMoveResolverX86::MoveMemoryToMemory64(int dst, int src) {
}
void ParallelMoveResolverX86::EmitMove(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4921,7 +4964,6 @@ void ParallelMoveResolverX86::Exchange(int mem1, int mem2) {
}
void ParallelMoveResolverX86::EmitSwap(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4989,17 +5031,24 @@ void ParallelMoveResolverX86::RestoreScratch(int reg) {
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(EAX));
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
Register out = locations->Out().AsRegister<Register>();
Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
@@ -5121,6 +5170,7 @@ void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -5161,10 +5211,11 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ Register target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ movl(target, Address(obj, class_offset));
@@ -5273,7 +5324,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -5315,6 +5366,7 @@ void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
: LocationSummary::kNoCall;
break;
case TypeCheckKind::kInterfaceCheck:
+ case TypeCheckKind::kUnresolvedCheck:
call_kind = LocationSummary::kCall;
break;
case TypeCheckKind::kArrayCheck:
@@ -5441,6 +5493,7 @@ void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
@@ -5604,15 +5657,13 @@ void InstructionCodeGeneratorX86::HandleBitwiseOperation(HBinaryOperation* instr
}
}
-void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5651,7 +5702,7 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
} else {
__ cmpl(value_reg, Immediate(case_value));
}
- __ j(kEqual, codegen_->GetLabelOf(successors.at(i)));
+ __ j(kEqual, codegen_->GetLabelOf(successors[i]));
}
// And the default for any other value.
@@ -5660,6 +5711,51 @@ void InstructionCodeGeneratorX86::VisitPackedSwitch(HPackedSwitch* switch_instr)
}
}
+void LocationsBuilderX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
+ locations->SetInAt(0, Location::RequiresRegister());
+
+ // Constant area pointer.
+ locations->SetInAt(1, Location::RequiresRegister());
+
+ // And the temporary we need.
+ locations->AddTemp(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86PackedSwitch(HX86PackedSwitch* switch_instr) {
+ int32_t lower_bound = switch_instr->GetStartValue();
+ int32_t num_entries = switch_instr->GetNumEntries();
+ LocationSummary* locations = switch_instr->GetLocations();
+ Register value_reg = locations->InAt(0).AsRegister<Register>();
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+
+ // Optimizing has a jump area.
+ Register temp_reg = locations->GetTemp(0).AsRegister<Register>();
+ Register constant_area = locations->InAt(1).AsRegister<Register>();
+
+ // Remove the bias, if needed.
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg, -lower_bound));
+ value_reg = temp_reg;
+ }
+
+ // Is the value in range?
+ DCHECK_GE(num_entries, 1);
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load (target-constant_area) from the jump table, indexing by the value.
+ __ movl(temp_reg, codegen_->LiteralCaseTable(switch_instr, constant_area, value_reg));
+
+ // Compute the actual target address by adding in constant_area.
+ __ addl(temp_reg, constant_area);
+
+ // And jump.
+ __ jmp(temp_reg);
+}
+
void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
HX86ComputeBaseMethodAddress* insn) {
LocationSummary* locations =
@@ -5743,28 +5839,18 @@ void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromCons
}
}
-void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
- // Generate the constant area if needed.
- X86Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values.
- assembler->Align(4, 0);
- constant_area_start_ = assembler->CodeSize();
- assembler->AddConstantArea();
- }
-
- // And finish up.
- CodeGenerator::Finalize(allocator);
-}
-
/**
* Class to handle late fixup of offsets into constant area.
*/
class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
public:
- RIPFixup(const CodeGeneratorX86& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
+ RIPFixup(CodeGeneratorX86& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86* codegen_;
private:
void Process(const MemoryRegion& region, int pos) OVERRIDE {
@@ -5772,19 +5858,77 @@ class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenera
// last 4 bytes of the instruction.
// The value to patch is the distance from the offset in the constant area
// from the address computed by the HX86ComputeBaseMethodAddress instruction.
- int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - codegen_->GetMethodAddressOffset();;
// Patch in the right value.
region.StoreUnaligned<int32_t>(pos - 4, relative_position);
}
- const CodeGeneratorX86& codegen_;
-
// Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
+ int32_t offset_into_constant_area_;
+};
+
+/**
+ * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86& codegen, HX86PackedSwitch* switch_instr)
+ : RIPFixup(codegen, static_cast<size_t>(-1)), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // The label values in the jump table are computed relative to the
+ // instruction addressing the constant area.
+ const int32_t relative_offset = codegen_->GetMethodAddressOffset();
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - relative_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HX86PackedSwitch* switch_instr_;
};
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+ // Generate the constant area if needed.
+ X86Assembler* assembler = GetAssembler();
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+ // byte values.
+ assembler->Align(4, 0);
+ constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
+ assembler->AddConstantArea();
+ }
+
+ // And finish up.
+ CodeGenerator::Finalize(allocator);
+}
+
Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address(reg, kDummy32BitOffset, fixup);
@@ -5805,98 +5949,18 @@ Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
return Address(reg, kDummy32BitOffset, fixup);
}
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class ConstantHandlerVisitor : public HGraphVisitor {
- public:
- explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
-
- private:
- void VisitAdd(HAdd* add) OVERRIDE {
- BinaryFP(add);
- }
-
- void VisitSub(HSub* sub) OVERRIDE {
- BinaryFP(sub);
- }
-
- void VisitMul(HMul* mul) OVERRIDE {
- BinaryFP(mul);
- }
-
- void VisitDiv(HDiv* div) OVERRIDE {
- BinaryFP(div);
- }
-
- void VisitReturn(HReturn* ret) OVERRIDE {
- HConstant* value = ret->InputAt(0)->AsConstant();
- if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
- ReplaceInput(ret, value, 0, true);
- }
- }
-
- void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
+Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
+ Register reg,
+ Register value) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
- void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
- HandleInvoke(invoke);
- }
-
- void BinaryFP(HBinaryOperation* bin) {
- HConstant* rhs = bin->InputAt(1)->AsConstant();
- if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
- ReplaceInput(bin, rhs, 1, false);
- }
- }
-
- void InitializeConstantAreaPointer(HInstruction* user) {
- // Ensure we only initialize the pointer once.
- if (base_ != nullptr) {
- return;
- }
-
- HGraph* graph = GetGraph();
- HBasicBlock* entry = graph->GetEntryBlock();
- base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
- HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
- entry->InsertInstructionBefore(base_, insert_pos);
- DCHECK(base_ != nullptr);
- }
-
- void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
- InitializeConstantAreaPointer(insn);
- HGraph* graph = GetGraph();
- HBasicBlock* block = insn->GetBlock();
- HX86LoadFromConstantTable* load_constant =
- new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
- block->InsertInstructionBefore(load_constant, insn);
- insn->ReplaceInput(load_constant, input_index);
- }
-
- void HandleInvoke(HInvoke* invoke) {
- // Ensure that we can load FP arguments from the constant area.
- for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
- HConstant* input = invoke->InputAt(i)->AsConstant();
- if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
- ReplaceInput(invoke, input, i, true);
- }
- }
- }
-
- // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
- // input to the HX86LoadFromConstantTable instructions.
- HX86ComputeBaseMethodAddress* base_;
-};
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
-void ConstantAreaFixups::Run() {
- ConstantHandlerVisitor visitor(graph_);
- visitor.VisitInsertionOrder();
+ // We want a scaled address, as we are extracting the correct offset from the table.
+ return Address(reg, value, TIMES_4, kDummy32BitOffset, table_fixup);
}
// TODO: target as memory.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ae2d84f945..fdfc5ab69b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -245,6 +245,8 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86);
};
+class JumpTableRIPFixup;
+
class CodeGeneratorX86 : public CodeGenerator {
public:
CodeGeneratorX86(HGraph* graph,
@@ -385,6 +387,8 @@ class CodeGeneratorX86 : public CodeGenerator {
Address LiteralInt32Address(int32_t v, Register reg);
Address LiteralInt64Address(int64_t v, Register reg);
+ Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
+
void Finalize(CodeAllocator* allocator) OVERRIDE;
private:
@@ -405,6 +409,9 @@ class CodeGeneratorX86 : public CodeGenerator {
// Used for fixups to the constant area.
int32_t constant_area_start_;
+ // Fixups for jump tables that need to be patched after the constant table is generated.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
// If there is a HX86ComputeBaseMethodAddress instruction in the graph
// (which shall be the sole instruction of this kind), subtracting this offset
// from the value contained in the out register of this HX86ComputeBaseMethodAddress
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 791bb9e6aa..bf570f581b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -36,9 +36,6 @@ namespace art {
namespace x86_64 {
-// Some x86_64 instructions require a register to be available as temp.
-static constexpr Register TMP = R11;
-
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = RDI;
@@ -452,11 +449,16 @@ inline Condition X86_64IntegerCondition(IfCondition cond) {
case kCondLE: return kLessEqual;
case kCondGT: return kGreater;
case kCondGE: return kGreaterEqual;
+ case kCondB: return kBelow;
+ case kCondBE: return kBelowEqual;
+ case kCondA: return kAbove;
+ case kCondAE: return kAboveEqual;
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
}
+// Maps FP condition to x86_64 name.
inline Condition X86_64FPCondition(IfCondition cond) {
switch (cond) {
case kCondEQ: return kEqual;
@@ -465,6 +467,7 @@ inline Condition X86_64FPCondition(IfCondition cond) {
case kCondLE: return kBelowEqual;
case kCondGT: return kAbove;
case kCondGE: return kAboveEqual;
+ default: break; // should not happen
};
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -673,7 +676,8 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
constant_area_start_(0),
method_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
- pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
+ pc_rel_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
+ fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
@@ -1046,8 +1050,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
@@ -1275,9 +1278,8 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -1304,8 +1306,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
}
-void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
@@ -1477,6 +1478,38 @@ void InstructionCodeGeneratorX86_64::VisitGreaterThanOrEqual(HGreaterThanOrEqual
VisitCondition(comp);
}
+void LocationsBuilderX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelow(HBelow* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitBelowOrEqual(HBelowOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAbove(HAbove* comp) {
+ VisitCondition(comp);
+}
+
+void LocationsBuilderX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
+void InstructionCodeGeneratorX86_64::VisitAboveOrEqual(HAboveOrEqual* comp) {
+ VisitCondition(comp);
+}
+
void LocationsBuilderX86_64::VisitCompare(HCompare* compare) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
@@ -1578,9 +1611,8 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
@@ -1589,9 +1621,8 @@ void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -1600,9 +1631,8 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -1611,9 +1641,8 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1622,9 +1651,9 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1639,8 +1668,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3594,8 +3622,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -4415,13 +4442,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -4479,7 +4504,6 @@ X86_64Assembler* ParallelMoveResolverX86_64::GetAssembler() const {
}
void ParallelMoveResolverX86_64::EmitMove(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4638,7 +4662,6 @@ void ParallelMoveResolverX86_64::Exchange64(XmmRegister reg, int mem) {
}
void ParallelMoveResolverX86_64::EmitSwap(size_t index) {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
Location source = move->GetSource();
Location destination = move->GetDestination();
@@ -4694,17 +4717,24 @@ void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
}
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
- LocationSummary::CallKind call_kind = cls->CanCallRuntime()
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall;
- LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ InvokeRuntimeCallingConvention calling_convention;
+ CodeGenerator::CreateLoadClassLocationSummary(
+ cls,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ Location::RegisterLocation(RAX));
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
LocationSummary* locations = cls->GetLocations();
+ if (cls->NeedsAccessCheck()) {
+ codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
+ cls,
+ cls->GetDexPc(),
+ nullptr);
+ return;
+ }
+
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
if (cls->IsReferrersClass()) {
@@ -4817,6 +4847,7 @@ void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kArrayObjectCheck:
call_kind = LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -4857,10 +4888,11 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
__ j(kEqual, &zero);
}
- // In case of an interface check, we put the object class into the object register.
+ // In case of an interface/unresolved check, we put the object class into the object register.
// This is safe, as the register is caller-save, and the object must be in another
// register if it survives the runtime call.
- CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck)
+ CpuRegister target = (instruction->GetTypeCheckKind() == TypeCheckKind::kInterfaceCheck) ||
+ (instruction->GetTypeCheckKind() == TypeCheckKind::kUnresolvedCheck)
? obj
: out;
__ movl(target, Address(obj, class_offset));
@@ -4974,7 +5006,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
}
break;
}
-
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default: {
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
@@ -5015,6 +5047,7 @@ void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
call_kind = LocationSummary::kCall;
break;
@@ -5142,6 +5175,7 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
__ j(kNotEqual, slow_path->GetEntryLabel());
break;
}
+ case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
default:
codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast),
@@ -5287,15 +5321,13 @@ void InstructionCodeGeneratorX86_64::HandleBitwiseOperation(HBinaryOperation* in
}
}
-void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -5316,31 +5348,43 @@ void LocationsBuilderX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
+ locations->AddTemp(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
int32_t lower_bound = switch_instr->GetStartValue();
int32_t num_entries = switch_instr->GetNumEntries();
LocationSummary* locations = switch_instr->GetLocations();
- CpuRegister value_reg = locations->InAt(0).AsRegister<CpuRegister>();
- HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ CpuRegister value_reg_in = locations->InAt(0).AsRegister<CpuRegister>();
+ CpuRegister temp_reg = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister base_reg = locations->GetTemp(1).AsRegister<CpuRegister>();
- // Create a series of compare/jumps.
- const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
- for (int i = 0; i < num_entries; i++) {
- int32_t case_value = lower_bound + i;
- if (case_value == 0) {
- __ testl(value_reg, value_reg);
- } else {
- __ cmpl(value_reg, Immediate(case_value));
- }
- __ j(kEqual, codegen_->GetLabelOf(successors.at(i)));
+ // Remove the bias, if needed.
+ Register value_reg_out = value_reg_in.AsRegister();
+ if (lower_bound != 0) {
+ __ leal(temp_reg, Address(value_reg_in, -lower_bound));
+ value_reg_out = temp_reg.AsRegister();
}
+ CpuRegister value_reg(value_reg_out);
- // And the default for any other value.
- if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
- __ jmp(codegen_->GetLabelOf(default_block));
- }
+ // Is the value in range?
+ HBasicBlock* default_block = switch_instr->GetDefaultBlock();
+ __ cmpl(value_reg, Immediate(num_entries - 1));
+ __ j(kAbove, codegen_->GetLabelOf(default_block));
+
+ // We are in the range of the table.
+ // Load the address of the jump table in the constant area.
+ __ leaq(base_reg, codegen_->LiteralCaseTable(switch_instr));
+
+ // Load the (signed) offset from the jump table.
+ __ movsxd(temp_reg, Address(base_reg, value_reg, TIMES_4, 0));
+
+ // Add the offset to the address of the table base.
+ __ addq(temp_reg, base_reg);
+
+ // And jump.
+ __ jmp(temp_reg);
}
void CodeGeneratorX86_64::Load64BitValue(CpuRegister dest, int64_t value) {
@@ -5366,15 +5410,85 @@ void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
}
}
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
+ public:
+ RIPFixup(CodeGeneratorX86_64& codegen, size_t offset)
+ : codegen_(&codegen), offset_into_constant_area_(offset) {}
+
+ protected:
+ void SetOffset(size_t offset) { offset_into_constant_area_ = offset; }
+
+ CodeGeneratorX86_64* codegen_;
+
+ private:
+ void Process(const MemoryRegion& region, int pos) OVERRIDE {
+ // Patch the correct offset for the instruction. We use the address of the
+ // 'next' instruction, which is 'pos' (patch the 4 bytes before).
+ int32_t constant_offset = codegen_->ConstantAreaStart() + offset_into_constant_area_;
+ int32_t relative_position = constant_offset - pos;
+
+ // Patch in the right value.
+ region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+ }
+
+ // Location in constant area that the fixup refers to.
+ size_t offset_into_constant_area_;
+};
+
+/**
+ t * Class to handle late fixup of offsets to a jump table that will be created in the
+ * constant area.
+ */
+class JumpTableRIPFixup : public RIPFixup {
+ public:
+ JumpTableRIPFixup(CodeGeneratorX86_64& codegen, HPackedSwitch* switch_instr)
+ : RIPFixup(codegen, -1), switch_instr_(switch_instr) {}
+
+ void CreateJumpTable() {
+ X86_64Assembler* assembler = codegen_->GetAssembler();
+
+ // Ensure that the reference to the jump table has the correct offset.
+ const int32_t offset_in_constant_table = assembler->ConstantAreaSize();
+ SetOffset(offset_in_constant_table);
+
+ // Compute the offset from the start of the function to this jump table.
+ const int32_t current_table_offset = assembler->CodeSize() + offset_in_constant_table;
+
+ // Populate the jump table with the correct values for the jump table.
+ int32_t num_entries = switch_instr_->GetNumEntries();
+ HBasicBlock* block = switch_instr_->GetBlock();
+ const ArenaVector<HBasicBlock*>& successors = block->GetSuccessors();
+ // The value that we want is the target offset - the position of the table.
+ for (int32_t i = 0; i < num_entries; i++) {
+ HBasicBlock* b = successors[i];
+ Label* l = codegen_->GetLabelOf(b);
+ DCHECK(l->IsBound());
+ int32_t offset_to_block = l->Position() - current_table_offset;
+ assembler->AppendInt32(offset_to_block);
+ }
+ }
+
+ private:
+ const HPackedSwitch* switch_instr_;
+};
+
void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
// Generate the constant area if needed.
X86_64Assembler* assembler = GetAssembler();
- if (!assembler->IsConstantAreaEmpty()) {
- // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
- // byte values. If used for vectors at a later time, this will need to be
- // updated to 16 bytes with the appropriate offset.
+ if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
+ // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8 byte values.
assembler->Align(4, 0);
constant_area_start_ = assembler->CodeSize();
+
+ // Populate any jump tables.
+ for (auto jump_table : fixups_to_jump_tables_) {
+ jump_table->CreateJumpTable();
+ }
+
+ // And now add the constant area to the generated code.
assembler->AddConstantArea();
}
@@ -5382,31 +5496,6 @@ void CodeGeneratorX86_64::Finalize(CodeAllocator* allocator) {
CodeGenerator::Finalize(allocator);
}
-/**
- * Class to handle late fixup of offsets into constant area.
- */
-class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocCodeGenerator> {
- public:
- RIPFixup(const CodeGeneratorX86_64& codegen, int offset)
- : codegen_(codegen), offset_into_constant_area_(offset) {}
-
- private:
- void Process(const MemoryRegion& region, int pos) OVERRIDE {
- // Patch the correct offset for the instruction. We use the address of the
- // 'next' instruction, which is 'pos' (patch the 4 bytes before).
- int constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
- int relative_position = constant_offset - pos;
-
- // Patch in the right value.
- region.StoreUnaligned<int32_t>(pos - 4, relative_position);
- }
-
- const CodeGeneratorX86_64& codegen_;
-
- // Location in constant area that the fixup refers to.
- int offset_into_constant_area_;
-};
-
Address CodeGeneratorX86_64::LiteralDoubleAddress(double v) {
AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
return Address::RIP(fixup);
@@ -5447,6 +5536,16 @@ void CodeGeneratorX86_64::MoveFromReturnRegister(Location trg, Primitive::Type t
GetMoveResolver()->EmitNativeCode(&parallel_move);
}
+Address CodeGeneratorX86_64::LiteralCaseTable(HPackedSwitch* switch_instr) {
+ // Create a fixup to be used to create and address the jump table.
+ JumpTableRIPFixup* table_fixup =
+ new (GetGraph()->GetArena()) JumpTableRIPFixup(*this, switch_instr);
+
+ // We have to populate the jump tables.
+ fixups_to_jump_tables_.push_back(table_fixup);
+ return Address::RIP(table_fixup);
+}
+
#undef __
} // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index ecc8630e6b..dc86a48ce7 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -30,6 +30,9 @@ namespace x86_64 {
// Use a local definition to prevent copying mistakes.
static constexpr size_t kX86_64WordSize = kX86_64PointerSize;
+// Some x86_64 instructions require a register to be available as temp.
+static constexpr Register TMP = R11;
+
static constexpr Register kParameterCoreRegisters[] = { RSI, RDX, RCX, R8, R9 };
static constexpr FloatRegister kParameterFloatRegisters[] =
{ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7 };
@@ -231,6 +234,9 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorX86_64);
};
+// Class for fixups to jump tables.
+class JumpTableRIPFixup;
+
class CodeGeneratorX86_64 : public CodeGenerator {
public:
CodeGeneratorX86_64(HGraph* graph,
@@ -351,6 +357,7 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// Load a 64 bit value into a register in the most efficient manner.
void Load64BitValue(CpuRegister dest, int64_t value);
+ Address LiteralCaseTable(HPackedSwitch* switch_instr);
// Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
void Store64BitValueToStack(Location dest, int64_t value);
@@ -388,6 +395,9 @@ class CodeGeneratorX86_64 : public CodeGenerator {
// We will fix this up in the linker later to have the right value.
static constexpr int32_t kDummy32BitOffset = 256;
+ // Fixups for jump tables need to be handled specially.
+ ArenaVector<JumpTableRIPFixup*> fixups_to_jump_tables_;
+
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64);
};
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 5fc305cd34..fe5af2fc5e 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -561,7 +561,7 @@ TEST(CodegenTest, NonMaterializedCondition) {
ASSERT_FALSE(equal->NeedsMaterialization());
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -667,7 +667,7 @@ TEST(CodegenTest, MaterializedCondition1) {
code_block->AddInstruction(&ret);
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -733,7 +733,7 @@ TEST(CodegenTest, MaterializedCondition2) {
if_false_block->AddInstruction(&ret_ge);
auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessor(0);
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -761,4 +761,130 @@ TEST(CodegenTest, ReturnDivInt2Addr) {
TestCode(data, true, 2);
}
+// Helper method.
+static void TestComparison(IfCondition condition, int64_t i, int64_t j, Primitive::Type type) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(block);
+
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ graph->SetExitBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ entry_block->AddSuccessor(block);
+ block->AddSuccessor(exit_block);
+
+ HInstruction* op1;
+ HInstruction* op2;
+ if (type == Primitive::kPrimInt) {
+ op1 = graph->GetIntConstant(i);
+ op2 = graph->GetIntConstant(j);
+ } else {
+ DCHECK_EQ(type, Primitive::kPrimLong);
+ op1 = graph->GetLongConstant(i);
+ op2 = graph->GetLongConstant(j);
+ }
+
+ HInstruction* comparison = nullptr;
+ bool expected_result = false;
+ const uint64_t x = i;
+ const uint64_t y = j;
+ switch (condition) {
+ case kCondEQ:
+ comparison = new (&allocator) HEqual(op1, op2);
+ expected_result = (i == j);
+ break;
+ case kCondNE:
+ comparison = new (&allocator) HNotEqual(op1, op2);
+ expected_result = (i != j);
+ break;
+ case kCondLT:
+ comparison = new (&allocator) HLessThan(op1, op2);
+ expected_result = (i < j);
+ break;
+ case kCondLE:
+ comparison = new (&allocator) HLessThanOrEqual(op1, op2);
+ expected_result = (i <= j);
+ break;
+ case kCondGT:
+ comparison = new (&allocator) HGreaterThan(op1, op2);
+ expected_result = (i > j);
+ break;
+ case kCondGE:
+ comparison = new (&allocator) HGreaterThanOrEqual(op1, op2);
+ expected_result = (i >= j);
+ break;
+ case kCondB:
+ comparison = new (&allocator) HBelow(op1, op2);
+ expected_result = (x < y);
+ break;
+ case kCondBE:
+ comparison = new (&allocator) HBelowOrEqual(op1, op2);
+ expected_result = (x <= y);
+ break;
+ case kCondA:
+ comparison = new (&allocator) HAbove(op1, op2);
+ expected_result = (x > y);
+ break;
+ case kCondAE:
+ comparison = new (&allocator) HAboveOrEqual(op1, op2);
+ expected_result = (x >= y);
+ break;
+ }
+ block->AddInstruction(comparison);
+ block->AddInstruction(new (&allocator) HReturn(comparison));
+
+ auto hook_before_codegen = [](HGraph*) {
+ };
+ RunCodeOptimized(graph, hook_before_codegen, true, expected_result);
+}
+
+TEST(CodegenTest, ComparisonsInt) {
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimInt);
+ TestComparison(kCondNE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondLE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGT, i, j, Primitive::kPrimInt);
+ TestComparison(kCondGE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondB, i, j, Primitive::kPrimInt);
+ TestComparison(kCondBE, i, j, Primitive::kPrimInt);
+ TestComparison(kCondA, i, j, Primitive::kPrimInt);
+ TestComparison(kCondAE, i, j, Primitive::kPrimInt);
+ }
+ }
+}
+
+TEST(CodegenTest, ComparisonsLong) {
+ // TODO: make MIPS work for long
+ if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
+ return;
+ }
+
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimLong);
+ TestComparison(kCondNE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondLE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGT, i, j, Primitive::kPrimLong);
+ TestComparison(kCondGE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondB, i, j, Primitive::kPrimLong);
+ TestComparison(kCondBE, i, j, Primitive::kPrimLong);
+ TestComparison(kCondA, i, j, Primitive::kPrimLong);
+ TestComparison(kCondAE, i, j, Primitive::kPrimLong);
+ }
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index f54547534f..4abe5e953c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -206,7 +206,9 @@ static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* inst
if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
- return vixl::Assembler::IsImmAddSub(value);
+ // If `value` does not fit but `-value` does, VIXL will automatically use
+ // the 'opposite' instruction.
+ return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
diff --git a/compiler/optimizing/constant_area_fixups_x86.cc b/compiler/optimizing/constant_area_fixups_x86.cc
new file mode 100644
index 0000000000..c3470002c5
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.cc
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "constant_area_fixups_x86.h"
+
+namespace art {
+namespace x86 {
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+ explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+ void VisitAdd(HAdd* add) OVERRIDE {
+ BinaryFP(add);
+ }
+
+ void VisitSub(HSub* sub) OVERRIDE {
+ BinaryFP(sub);
+ }
+
+ void VisitMul(HMul* mul) OVERRIDE {
+ BinaryFP(mul);
+ }
+
+ void VisitDiv(HDiv* div) OVERRIDE {
+ BinaryFP(div);
+ }
+
+ void VisitReturn(HReturn* ret) OVERRIDE {
+ HConstant* value = ret->InputAt(0)->AsConstant();
+ if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+ ReplaceInput(ret, value, 0, true);
+ }
+ }
+
+ void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+ HandleInvoke(invoke);
+ }
+
+ void BinaryFP(HBinaryOperation* bin) {
+ HConstant* rhs = bin->InputAt(1)->AsConstant();
+ if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+ ReplaceInput(bin, rhs, 1, false);
+ }
+ }
+
+ void VisitPackedSwitch(HPackedSwitch* switch_insn) OVERRIDE {
+ // We need to replace the HPackedSwitch with a HX86PackedSwitch in order to
+ // address the constant area.
+ InitializeConstantAreaPointer(switch_insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = switch_insn->GetBlock();
+ HX86PackedSwitch* x86_switch = new (graph->GetArena()) HX86PackedSwitch(
+ switch_insn->GetStartValue(),
+ switch_insn->GetNumEntries(),
+ switch_insn->InputAt(0),
+ base_,
+ switch_insn->GetDexPc());
+ block->ReplaceAndRemoveInstructionWith(switch_insn, x86_switch);
+ }
+
+ void InitializeConstantAreaPointer(HInstruction* user) {
+ // Ensure we only initialize the pointer once.
+ if (base_ != nullptr) {
+ return;
+ }
+
+ HGraph* graph = GetGraph();
+ HBasicBlock* entry = graph->GetEntryBlock();
+ base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+ HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+ entry->InsertInstructionBefore(base_, insert_pos);
+ DCHECK(base_ != nullptr);
+ }
+
+ void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+ InitializeConstantAreaPointer(insn);
+ HGraph* graph = GetGraph();
+ HBasicBlock* block = insn->GetBlock();
+ HX86LoadFromConstantTable* load_constant =
+ new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+ block->InsertInstructionBefore(load_constant, insn);
+ insn->ReplaceInput(load_constant, input_index);
+ }
+
+ void HandleInvoke(HInvoke* invoke) {
+ // Ensure that we can load FP arguments from the constant area.
+ for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+ HConstant* input = invoke->InputAt(i)->AsConstant();
+ if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+ ReplaceInput(invoke, input, i, true);
+ }
+ }
+ }
+
+ // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+ // input to the HX86LoadFromConstantTable instructions.
+ HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+ ConstantHandlerVisitor visitor(graph_);
+ visitor.VisitInsertionOrder();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index 10e4bc98a6..b2e222f1a9 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -51,7 +51,7 @@ static void TestCode(const uint16_t* data,
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HConstantFolding(graph).Run();
- SSAChecker ssa_checker_cf(&allocator, graph);
+ SSAChecker ssa_checker_cf(graph);
ssa_checker_cf.Run();
ASSERT_TRUE(ssa_checker_cf.IsValid());
@@ -63,7 +63,7 @@ static void TestCode(const uint16_t* data,
check_after_cf(graph);
HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker_dce(&allocator, graph);
+ SSAChecker ssa_checker_dce(graph);
ssa_checker_dce.Run();
ASSERT_TRUE(ssa_checker_dce.IsValid());
@@ -113,7 +113,7 @@ TEST(ConstantFolding, IntConstantFoldingNegation) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), -1);
};
@@ -175,7 +175,7 @@ TEST(ConstantFolding, LongConstantFoldingNegation) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), INT64_C(-4294967296));
};
@@ -237,7 +237,7 @@ TEST(ConstantFolding, IntConstantFoldingOnAddition1) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 3);
};
@@ -317,7 +317,7 @@ TEST(ConstantFolding, IntConstantFoldingOnAddition2) {
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst1 = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst1 = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst1->IsIntConstant());
ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 12);
HInstruction* inst2 = inst1->GetPrevious();
@@ -389,7 +389,7 @@ TEST(ConstantFolding, IntConstantFoldingOnSubtraction) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
};
@@ -453,7 +453,7 @@ TEST(ConstantFolding, LongConstantFoldingOnAddition) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 3);
};
@@ -518,7 +518,7 @@ TEST(ConstantFolding, LongConstantFoldingOnSubtraction) {
// Check the value of the computed constant.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsLongConstant());
ASSERT_EQ(inst->AsLongConstant()->GetValue(), 1);
};
@@ -620,7 +620,7 @@ TEST(ConstantFolding, IntConstantFoldingAndJumps) {
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst1 = graph->GetBlock(4)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst1 = graph->GetBlocks()[4]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst1->IsIntConstant());
ASSERT_EQ(inst1->AsIntConstant()->GetValue(), 20);
HInstruction* inst2 = inst1->GetPrevious();
@@ -710,7 +710,7 @@ TEST(ConstantFolding, ConstantCondition) {
// Check the values of the computed constants.
auto check_after_cf = [](HGraph* graph) {
- HInstruction* inst = graph->GetBlock(1)->GetFirstInstruction()->InputAt(0);
+ HInstruction* inst = graph->GetBlocks()[1]->GetFirstInstruction()->InputAt(0);
ASSERT_TRUE(inst->IsIntConstant());
ASSERT_EQ(inst->AsIntConstant()->GetValue(), 1);
};
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 007d0e3332..9754043f32 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -63,7 +63,7 @@ static void MarkReachableBlocks(HGraph* graph, ArenaBitVector* visited) {
static_cast<uint32_t>(switch_value) - static_cast<uint32_t>(start_value);
if (switch_index < switch_instruction->GetNumEntries()) {
live_successors = live_successors.SubArray(switch_index, 1u);
- DCHECK_EQ(live_successors[0], block->GetSuccessor(switch_index));
+ DCHECK_EQ(live_successors[0], block->GetSuccessors()[switch_index]);
} else {
live_successors = live_successors.SubArray(switch_instruction->GetNumEntries(), 1u);
DCHECK_EQ(live_successors[0], switch_instruction->GetDefaultBlock());
@@ -136,7 +136,7 @@ void HDeadCodeElimination::RemoveDeadBlocks() {
it.Advance();
continue;
}
- HBasicBlock* successor = block->GetSuccessor(0);
+ HBasicBlock* successor = block->GetSuccessors()[0];
if (successor->IsExitBlock() || successor->GetPredecessors().size() != 1u) {
it.Advance();
continue;
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index ee3a61aa0c..cf0a4acd4a 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -45,7 +45,7 @@ static void TestCode(const uint16_t* data,
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), CompilerOptions());
HDeadCodeElimination(graph).Run();
- SSAChecker ssa_checker(&allocator, graph);
+ SSAChecker ssa_checker(graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 6b186508cd..91e4a997fd 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -36,16 +36,16 @@ static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
if (blocks[i] == kInvalidBlockId) {
- if (graph->GetBlock(i) == nullptr) {
+ if (graph->GetBlocks()[i] == nullptr) {
// Dead block.
} else {
// Only the entry block has no dominator.
- ASSERT_EQ(nullptr, graph->GetBlock(i)->GetDominator());
- ASSERT_TRUE(graph->GetBlock(i)->IsEntryBlock());
+ ASSERT_EQ(nullptr, graph->GetBlocks()[i]->GetDominator());
+ ASSERT_TRUE(graph->GetBlocks()[i]->IsEntryBlock());
}
} else {
- ASSERT_NE(nullptr, graph->GetBlock(i)->GetDominator());
- ASSERT_EQ(blocks[i], graph->GetBlock(i)->GetDominator()->GetBlockId());
+ ASSERT_NE(nullptr, graph->GetBlocks()[i]->GetDominator());
+ ASSERT_EQ(blocks[i], graph->GetBlocks()[i]->GetDominator()->GetBlockId());
}
}
}
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index 9e0d352d3e..9b0eb70742 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -118,7 +118,7 @@ static void TestBlock(HGraph* graph,
uint32_t parent_loop_header_id,
const int* blocks_in_loop = nullptr,
size_t number_of_blocks = 0) {
- HBasicBlock* block = graph->GetBlock(block_id);
+ HBasicBlock* block = graph->GetBlocks()[block_id];
ASSERT_EQ(block->IsLoopHeader(), is_loop_header);
if (parent_loop_header_id == kInvalidBlockId) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
@@ -296,10 +296,10 @@ TEST(FindLoopsTest, InnerLoop) {
TestBlock(graph, 7, false, kInvalidBlockId); // exit block
TestBlock(graph, 8, false, 2); // synthesized block as pre header of inner loop
- ASSERT_TRUE(graph->GetBlock(3)->GetLoopInformation()->IsIn(
- *graph->GetBlock(2)->GetLoopInformation()));
- ASSERT_FALSE(graph->GetBlock(2)->GetLoopInformation()->IsIn(
- *graph->GetBlock(3)->GetLoopInformation()));
+ ASSERT_TRUE(graph->GetBlocks()[3]->GetLoopInformation()->IsIn(
+ *graph->GetBlocks()[2]->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlocks()[2]->GetLoopInformation()->IsIn(
+ *graph->GetBlocks()[3]->GetLoopInformation()));
}
TEST(FindLoopsTest, TwoLoops) {
@@ -326,10 +326,10 @@ TEST(FindLoopsTest, TwoLoops) {
TestBlock(graph, 6, false, kInvalidBlockId); // return block
TestBlock(graph, 7, false, kInvalidBlockId); // exit block
- ASSERT_FALSE(graph->GetBlock(4)->GetLoopInformation()->IsIn(
- *graph->GetBlock(2)->GetLoopInformation()));
- ASSERT_FALSE(graph->GetBlock(2)->GetLoopInformation()->IsIn(
- *graph->GetBlock(4)->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlocks()[4]->GetLoopInformation()->IsIn(
+ *graph->GetBlocks()[2]->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlocks()[2]->GetLoopInformation()->IsIn(
+ *graph->GetBlocks()[4]->GetLoopInformation()));
}
TEST(FindLoopsTest, NonNaturalLoop) {
@@ -344,8 +344,8 @@ TEST(FindLoopsTest, NonNaturalLoop) {
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- ASSERT_TRUE(graph->GetBlock(3)->IsLoopHeader());
- HLoopInformation* info = graph->GetBlock(3)->GetLoopInformation();
+ ASSERT_TRUE(graph->GetBlocks()[3]->IsLoopHeader());
+ HLoopInformation* info = graph->GetBlocks()[3]->GetLoopInformation();
ASSERT_EQ(1u, info->NumberOfBackEdges());
ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges()[0]));
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 4e1cafee66..3de96b5d84 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -16,10 +16,12 @@
#include "graph_checker.h"
+#include <algorithm>
#include <map>
#include <string>
#include <sstream>
+#include "base/arena_containers.h"
#include "base/bit_vector-inl.h"
#include "base/stringprintf.h"
@@ -29,19 +31,21 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_ = block;
// Check consistency with respect to predecessors of `block`.
- std::map<HBasicBlock*, size_t> predecessors_count;
+ ArenaSafeMap<HBasicBlock*, size_t> predecessors_count(
+ std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
for (HBasicBlock* p : block->GetPredecessors()) {
- ++predecessors_count[p];
+ auto it = predecessors_count.find(p);
+ if (it != predecessors_count.end()) {
+ ++it->second;
+ } else {
+ predecessors_count.Put(p, 1u);
+ }
}
for (auto& pc : predecessors_count) {
HBasicBlock* p = pc.first;
size_t p_count_in_block_predecessors = pc.second;
- size_t block_count_in_p_successors = 0;
- for (HBasicBlock* p_successor : p->GetSuccessors()) {
- if (p_successor == block) {
- ++block_count_in_p_successors;
- }
- }
+ size_t block_count_in_p_successors =
+ std::count(p->GetSuccessors().begin(), p->GetSuccessors().end(), block);
if (p_count_in_block_predecessors != block_count_in_p_successors) {
AddError(StringPrintf(
"Block %d lists %zu occurrences of block %d in its predecessors, whereas "
@@ -52,19 +56,21 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
}
// Check consistency with respect to successors of `block`.
- std::map<HBasicBlock*, size_t> successors_count;
+ ArenaSafeMap<HBasicBlock*, size_t> successors_count(
+ std::less<HBasicBlock*>(), GetGraph()->GetArena()->Adapter(kArenaAllocGraphChecker));
for (HBasicBlock* s : block->GetSuccessors()) {
- ++successors_count[s];
+ auto it = successors_count.find(s);
+ if (it != successors_count.end()) {
+ ++it->second;
+ } else {
+ successors_count.Put(s, 1u);
+ }
}
for (auto& sc : successors_count) {
HBasicBlock* s = sc.first;
size_t s_count_in_block_successors = sc.second;
- size_t block_count_in_s_predecessors = 0;
- for (HBasicBlock* s_predecessor : s->GetPredecessors()) {
- if (s_predecessor == block) {
- ++block_count_in_s_predecessors;
- }
- }
+ size_t block_count_in_s_predecessors =
+ std::count(s->GetPredecessors().begin(), s->GetPredecessors().end(), block);
if (s_count_in_block_successors != block_count_in_s_predecessors) {
AddError(StringPrintf(
"Block %d lists %zu occurrences of block %d in its successors, whereas "
@@ -351,7 +357,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// never exceptional successors.
const size_t num_normal_successors = block->NumberOfNormalSuccessors();
for (size_t j = 0; j < num_normal_successors; ++j) {
- HBasicBlock* successor = block->GetSuccessor(j);
+ HBasicBlock* successor = block->GetSuccessors()[j];
if (successor->IsCatchBlock()) {
AddError(StringPrintf("Catch block %d is a normal successor of block %d.",
successor->GetBlockId(),
@@ -359,7 +365,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
}
}
for (size_t j = num_normal_successors, e = block->GetSuccessors().size(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessor(j);
+ HBasicBlock* successor = block->GetSuccessors()[j];
if (!successor->IsCatchBlock()) {
AddError(StringPrintf("Normal block %d is an exceptional successor of block %d.",
successor->GetBlockId(),
@@ -373,7 +379,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
// not accounted for.
if (block->NumberOfNormalSuccessors() > 1) {
for (size_t j = 0, e = block->NumberOfNormalSuccessors(); j < e; ++j) {
- HBasicBlock* successor = block->GetSuccessor(j);
+ HBasicBlock* successor = block->GetSuccessors()[j];
if (successor->GetPredecessors().size() > 1) {
AddError(StringPrintf("Critical edge between blocks %d and %d.",
block->GetBlockId(),
@@ -456,14 +462,14 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
id,
num_preds));
} else {
- HBasicBlock* first_predecessor = loop_header->GetPredecessor(0);
+ HBasicBlock* first_predecessor = loop_header->GetPredecessors()[0];
if (loop_information->IsBackEdge(*first_predecessor)) {
AddError(StringPrintf(
"First predecessor of loop header %d is a back edge.",
id));
}
for (size_t i = 1, e = loop_header->GetPredecessors().size(); i < e; ++i) {
- HBasicBlock* predecessor = loop_header->GetPredecessor(i);
+ HBasicBlock* predecessor = loop_header->GetPredecessors()[i];
if (!loop_information->IsBackEdge(*predecessor)) {
AddError(StringPrintf(
"Loop header %d has multiple incoming (non back edge) blocks.",
@@ -493,7 +499,7 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
// Ensure all blocks in the loop are live and dominated by the loop header.
for (uint32_t i : loop_blocks.Indexes()) {
- HBasicBlock* loop_block = GetGraph()->GetBlock(i);
+ HBasicBlock* loop_block = GetGraph()->GetBlocks()[i];
if (loop_block == nullptr) {
AddError(StringPrintf("Loop defined by header %d contains a previously removed block %d.",
id,
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 7ddffc136a..abf3659d91 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -26,12 +26,11 @@ namespace art {
// A control-flow graph visitor performing various checks.
class GraphChecker : public HGraphDelegateVisitor {
public:
- GraphChecker(ArenaAllocator* allocator, HGraph* graph,
- const char* dump_prefix = "art::GraphChecker: ")
+ explicit GraphChecker(HGraph* graph, const char* dump_prefix = "art::GraphChecker: ")
: HGraphDelegateVisitor(graph),
- allocator_(allocator),
+ errors_(graph->GetArena()->Adapter(kArenaAllocGraphChecker)),
dump_prefix_(dump_prefix),
- seen_ids_(allocator, graph->GetCurrentInstructionId(), false) {}
+ seen_ids_(graph->GetArena(), graph->GetCurrentInstructionId(), false) {}
// Check the whole graph (in insertion order).
virtual void Run() { VisitInsertionOrder(); }
@@ -65,7 +64,7 @@ class GraphChecker : public HGraphDelegateVisitor {
}
// Get the list of detected errors.
- const std::vector<std::string>& GetErrors() const {
+ const ArenaVector<std::string>& GetErrors() const {
return errors_;
}
@@ -82,11 +81,10 @@ class GraphChecker : public HGraphDelegateVisitor {
errors_.push_back(error);
}
- ArenaAllocator* const allocator_;
// The block currently visited.
HBasicBlock* current_block_ = nullptr;
// Errors encountered while checking the graph.
- std::vector<std::string> errors_;
+ ArenaVector<std::string> errors_;
private:
// String displayed before dumped errors.
@@ -102,9 +100,8 @@ class SSAChecker : public GraphChecker {
public:
typedef GraphChecker super_type;
- // TODO: There's no need to pass a separate allocator as we could get it from the graph.
- SSAChecker(ArenaAllocator* allocator, HGraph* graph)
- : GraphChecker(allocator, graph, "art::SSAChecker: ") {}
+ explicit SSAChecker(HGraph* graph)
+ : GraphChecker(graph, "art::SSAChecker: ") {}
// Check the whole graph (in reverse post-order).
void Run() OVERRIDE {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 0f6677519e..fee56c7f9e 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -50,7 +50,7 @@ static void TestCode(const uint16_t* data) {
HGraph* graph = CreateCFG(&allocator, data);
ASSERT_NE(graph, nullptr);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
}
@@ -64,7 +64,7 @@ static void TestCodeSSA(const uint16_t* data) {
graph->BuildDominatorTree();
graph->TransformToSsa();
- SSAChecker ssa_checker(&allocator, graph);
+ SSAChecker ssa_checker(graph);
ssa_checker.Run();
ASSERT_TRUE(ssa_checker.IsValid());
}
@@ -112,7 +112,7 @@ TEST(GraphChecker, InconsistentPredecessorsAndSuccessors) {
ArenaAllocator allocator(&pool);
HGraph* graph = CreateSimpleCFG(&allocator);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
@@ -130,7 +130,7 @@ TEST(GraphChecker, BlockEndingWithNonBranchInstruction) {
ArenaAllocator allocator(&pool);
HGraph* graph = CreateSimpleCFG(&allocator);
- GraphChecker graph_checker(&allocator, graph);
+ GraphChecker graph_checker(graph);
graph_checker.Run();
ASSERT_TRUE(graph_checker.IsValid());
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index 7968e88117..d4b9b71952 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -99,7 +99,7 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock1) {
ASSERT_NE(false_block, return_block);
// Ensure the new block branches to the join block.
- ASSERT_EQ(false_block->GetSuccessor(0), return_block);
+ ASSERT_EQ(false_block->GetSuccessors()[0], return_block);
}
// Test that the successors of an if block stay consistent after a SimplifyCFG.
@@ -134,7 +134,7 @@ TEST(GraphTest, IfSuccessorSimpleJoinBlock2) {
ASSERT_NE(true_block, return_block);
// Ensure the new block branches to the join block.
- ASSERT_EQ(true_block->GetSuccessor(0), return_block);
+ ASSERT_EQ(true_block->GetSuccessors()[0], return_block);
}
// Test that the successors of an if block stay consistent after a SimplifyCFG.
@@ -164,11 +164,11 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges1) {
// Ensure there is only one back edge.
ASSERT_EQ(if_block->GetPredecessors().size(), 2u);
- ASSERT_EQ(if_block->GetPredecessor(0), entry_block);
- ASSERT_NE(if_block->GetPredecessor(1), if_block);
+ ASSERT_EQ(if_block->GetPredecessors()[0], entry_block);
+ ASSERT_NE(if_block->GetPredecessors()[1], if_block);
// Ensure the new block is the back edge.
- ASSERT_EQ(if_block->GetPredecessor(1),
+ ASSERT_EQ(if_block->GetPredecessors()[1],
if_block->GetLastInstruction()->AsIf()->IfTrueSuccessor());
}
@@ -199,11 +199,11 @@ TEST(GraphTest, IfSuccessorMultipleBackEdges2) {
// Ensure there is only one back edge.
ASSERT_EQ(if_block->GetPredecessors().size(), 2u);
- ASSERT_EQ(if_block->GetPredecessor(0), entry_block);
- ASSERT_NE(if_block->GetPredecessor(1), if_block);
+ ASSERT_EQ(if_block->GetPredecessors()[0], entry_block);
+ ASSERT_NE(if_block->GetPredecessors()[1], if_block);
// Ensure the new block is the back edge.
- ASSERT_EQ(if_block->GetPredecessor(1),
+ ASSERT_EQ(if_block->GetPredecessors()[1],
if_block->GetLastInstruction()->AsIf()->IfFalseSuccessor());
}
@@ -242,7 +242,7 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders1) {
// Ensure the new block is the successor of the true block.
ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessors().size(), 1u);
- ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessor(0),
+ ASSERT_EQ(if_instr->IfTrueSuccessor()->GetSuccessors()[0],
loop_block->GetLoopInformation()->GetPreHeader());
}
@@ -280,7 +280,7 @@ TEST(GraphTest, IfSuccessorMultiplePreHeaders2) {
// Ensure the new block is the successor of the false block.
ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessors().size(), 1u);
- ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessor(0),
+ ASSERT_EQ(if_instr->IfFalseSuccessor()->GetSuccessors()[0],
loop_block->GetLoopInformation()->GetPreHeader());
}
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 7a83662696..4111671a9b 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -253,7 +253,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
AddIndent();
output_ << "successors";
for (size_t i = 0; i < block->NumberOfNormalSuccessors(); ++i) {
- HBasicBlock* successor = block->GetSuccessor(i);
+ HBasicBlock* successor = block->GetSuccessors()[i];
output_ << " \"B" << successor->GetBlockId() << "\" ";
}
output_<< std::endl;
@@ -263,7 +263,7 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
AddIndent();
output_ << "xhandlers";
for (size_t i = block->NumberOfNormalSuccessors(); i < block->GetSuccessors().size(); ++i) {
- HBasicBlock* handler = block->GetSuccessor(i);
+ HBasicBlock* handler = block->GetSuccessors()[i];
output_ << " \"B" << handler->GetBlockId() << "\" ";
}
if (block->IsExitBlock() &&
@@ -362,6 +362,8 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
void VisitLoadClass(HLoadClass* load_class) OVERRIDE {
StartAttributeStream("gen_clinit_check") << std::boolalpha
<< load_class->MustGenerateClinitCheck() << std::noboolalpha;
+ StartAttributeStream("needs_access_check") << std::boolalpha
+ << load_class->NeedsAccessCheck() << std::noboolalpha;
}
void VisitCheckCast(HCheckCast* check_cast) OVERRIDE {
@@ -501,8 +503,11 @@ class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
StartAttributeStream("can_be_null")
<< std::boolalpha << instruction->CanBeNull() << std::noboolalpha;
StartAttributeStream("exact") << std::boolalpha << info.IsExact() << std::noboolalpha;
+ } else if (instruction->IsLoadClass()) {
+ StartAttributeStream("klass") << "unresolved";
} else {
- DCHECK(!is_after_pass_) << "Type info should be valid after reference type propagation";
+ DCHECK(!is_after_pass_)
+ << "Expected a valid rti after reference type propagation";
}
}
if (disasm_info_ != nullptr) {
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 7cf061773f..0a1758a936 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -351,7 +351,7 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
HBasicBlock* dominator = block->GetDominator();
ValueSet* dominator_set = sets_[dominator->GetBlockId()];
if (dominator->GetSuccessors().size() == 1) {
- DCHECK_EQ(dominator->GetSuccessor(0), block);
+ DCHECK_EQ(dominator->GetSuccessors()[0], block);
set = dominator_set;
} else {
// We have to copy if the dominator has other successors, or `block` is not a successor
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 32f45b5669..56f2718264 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -47,14 +47,16 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* to_remove = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
@@ -62,7 +64,8 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* different_offset = block->GetLastInstruction();
// Kill the value.
block->AddInstruction(new (&allocator) HInstanceFieldSet(parameter,
@@ -72,14 +75,16 @@ TEST(GVNTest, LocalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimNot,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* use_after_kill = block->GetLastInstruction();
block->AddInstruction(new (&allocator) HExit());
@@ -118,7 +123,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
HBasicBlock* then = new (&allocator) HBasicBlock(graph);
@@ -139,7 +145,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
then->AddInstruction(new (&allocator) HGoto());
else_->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -147,7 +154,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
else_->AddInstruction(new (&allocator) HGoto());
join->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -155,7 +163,8 @@ TEST(GVNTest, GlobalFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
join->AddInstruction(new (&allocator) HExit());
graph->TryBuildingSsa();
@@ -191,7 +200,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
block->AddInstruction(new (&allocator) HGoto());
HBasicBlock* loop_header = new (&allocator) HBasicBlock(graph);
@@ -212,7 +222,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_loop_header = loop_header->GetLastInstruction();
loop_header->AddInstruction(new (&allocator) HIf(block->GetLastInstruction()));
@@ -225,7 +236,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_set = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HInstanceFieldGet(parameter,
Primitive::kPrimBoolean,
@@ -233,7 +245,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_loop_body = loop_body->GetLastInstruction();
loop_body->AddInstruction(new (&allocator) HGoto());
@@ -243,7 +256,8 @@ TEST(GVNTest, LoopFieldElimination) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
HInstruction* field_get_in_exit = exit->GetLastInstruction();
exit->AddInstruction(new (&allocator) HExit());
@@ -339,7 +353,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache));
+ dex_cache,
+ 0));
SideEffectsAnalysis side_effects(graph);
side_effects.Run();
@@ -362,7 +377,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache),
+ dex_cache,
+ 0),
outer_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
@@ -386,7 +402,8 @@ TEST(GVNTest, LoopSideEffects) {
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache),
+ dex_cache,
+ 0),
inner_loop_body->GetLastInstruction());
SideEffectsAnalysis side_effects(graph);
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index e5123deed6..8968a44da8 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -47,7 +47,7 @@ static void RotateEntryPhiFirst(HLoopInformation* loop,
size_t phi_pos = -1;
const size_t size = scc->size();
for (size_t i = 0; i < size; i++) {
- HInstruction* other = scc->at(i);
+ HInstruction* other = (*scc)[i];
if (other->IsLoopHeaderPhi() && (phi == nullptr || phis.FoundBefore(other, phi))) {
phi = other;
phi_pos = i;
@@ -58,8 +58,7 @@ static void RotateEntryPhiFirst(HLoopInformation* loop,
if (phi != nullptr) {
new_scc->clear();
for (size_t i = 0; i < size; i++) {
- DCHECK_LT(phi_pos, size);
- new_scc->push_back(scc->at(phi_pos));
+ new_scc->push_back((*scc)[phi_pos]);
if (++phi_pos >= size) phi_pos = 0;
}
DCHECK_EQ(size, new_scc->size());
@@ -651,8 +650,7 @@ bool HInductionVarAnalysis::IsTaken(InductionInfo* lower_expr,
case kCondLE: return lower_value <= upper_value;
case kCondGT: return lower_value > upper_value;
case kCondGE: return lower_value >= upper_value;
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default: LOG(FATAL) << "CONDITION UNREACHABLE";
}
}
return false; // not certain, may be untaken
@@ -681,8 +679,8 @@ bool HInductionVarAnalysis::IsFinite(InductionInfo* upper_expr,
(IsIntAndGet(upper_expr, &value) && value >= (min - stride_value - 1));
case kCondGE:
return (IsIntAndGet(upper_expr, &value) && value >= (min - stride_value));
- case kCondEQ:
- case kCondNE: LOG(FATAL) << "CONDITION UNREACHABLE";
+ default:
+ LOG(FATAL) << "CONDITION UNREACHABLE";
}
return false; // not certain, may be infinite
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 0b65c564f7..f3b5f08c7e 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -43,6 +43,11 @@ namespace art {
static constexpr size_t kMaximumNumberOfHInstructions = 12;
void HInliner::Run() {
+ const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
+ if ((compiler_options.GetInlineDepthLimit() == 0)
+ || (compiler_options.GetInlineMaxCodeUnits() == 0)) {
+ return;
+ }
if (graph_->IsDebuggable()) {
// For simplicity, we currently never inline when the graph is debuggable. This avoids
// doing some logic in the runtime to discover if a method could have been inlined.
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 3287a0a119..d468540091 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -16,15 +16,16 @@
#include "instruction_simplifier.h"
+#include "intrinsics.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-class InstructionSimplifierVisitor : public HGraphVisitor {
+class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
public:
InstructionSimplifierVisitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph),
+ : HGraphDelegateVisitor(graph),
stats_(stats) {}
void Run();
@@ -71,9 +72,13 @@ class InstructionSimplifierVisitor : public HGraphVisitor {
void VisitXor(HXor* instruction) OVERRIDE;
void VisitInstanceOf(HInstanceOf* instruction) OVERRIDE;
void VisitFakeString(HFakeString* fake_string) OVERRIDE;
+ void VisitInvoke(HInvoke* invoke) OVERRIDE;
bool CanEnsureNotNullAt(HInstruction* instr, HInstruction* at) const;
+ void SimplifySystemArrayCopy(HInvoke* invoke);
+ void SimplifyStringEquals(HInvoke* invoke);
+
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
int simplifications_at_current_position_ = 0;
@@ -216,7 +221,11 @@ static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bo
}
ReferenceTypeInfo class_rti = klass->GetLoadedClassRTI();
- DCHECK(class_rti.IsValid() && class_rti.IsExact());
+ if (!class_rti.IsValid()) {
+ // Happens when the loaded class is unresolved.
+ return false;
+ }
+ DCHECK(class_rti.IsExact());
if (class_rti.IsSupertypeOf(obj_rti)) {
*outcome = true;
return true;
@@ -236,6 +245,12 @@ static bool TypeCheckHasKnownOutcome(HLoadClass* klass, HInstruction* object, bo
void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
HInstruction* object = check_cast->InputAt(0);
+ HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ if (load_class->NeedsAccessCheck()) {
+ // If we need to perform an access check we cannot remove the instruction.
+ return;
+ }
+
if (CanEnsureNotNullAt(object, check_cast)) {
check_cast->ClearMustDoNullCheck();
}
@@ -249,7 +264,6 @@ void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
}
bool outcome;
- HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
if (outcome) {
check_cast->GetBlock()->RemoveInstruction(check_cast);
@@ -271,6 +285,12 @@ void InstructionSimplifierVisitor::VisitCheckCast(HCheckCast* check_cast) {
void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
HInstruction* object = instruction->InputAt(0);
+ HLoadClass* load_class = instruction->InputAt(1)->AsLoadClass();
+ if (load_class->NeedsAccessCheck()) {
+ // If we need to perform an access check we cannot remove the instruction.
+ return;
+ }
+
bool can_be_null = true;
if (CanEnsureNotNullAt(object, instruction)) {
can_be_null = false;
@@ -286,7 +306,6 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
}
bool outcome;
- HLoadClass* load_class = instruction->InputAt(1)->AsLoadClass();
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
@@ -599,6 +618,8 @@ void InstructionSimplifierVisitor::VisitLessThanOrEqual(HLessThanOrEqual* condit
VisitCondition(condition);
}
+// TODO: unsigned comparisons too?
+
void InstructionSimplifierVisitor::VisitCondition(HCondition* condition) {
// Try to fold an HCompare into this HCondition.
@@ -1033,4 +1054,101 @@ void InstructionSimplifierVisitor::VisitFakeString(HFakeString* instruction) {
instruction->GetBlock()->RemoveInstruction(instruction);
}
+void InstructionSimplifierVisitor::SimplifyStringEquals(HInvoke* instruction) {
+ HInstruction* argument = instruction->InputAt(1);
+ HInstruction* receiver = instruction->InputAt(0);
+ if (receiver == argument) {
+ // Because String.equals is an instance call, the receiver is
+ // a null check if we don't know it's null. The argument however, will
+ // be the actual object. So we cannot end up in a situation where both
+ // are equal but could be null.
+ DCHECK(CanEnsureNotNullAt(argument, instruction));
+ instruction->ReplaceWith(GetGraph()->GetIntConstant(1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else {
+ StringEqualsOptimizations optimizations(instruction);
+ if (CanEnsureNotNullAt(argument, instruction)) {
+ optimizations.SetArgumentNotNull();
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo argument_rti = argument->GetReferenceTypeInfo();
+ if (argument_rti.IsValid() && argument_rti.IsStringClass()) {
+ optimizations.SetArgumentIsString();
+ }
+ }
+}
+
+static bool IsArrayLengthOf(HInstruction* potential_length, HInstruction* potential_array) {
+ if (potential_length->IsArrayLength()) {
+ return potential_length->InputAt(0) == potential_array;
+ }
+
+ if (potential_array->IsNewArray()) {
+ return potential_array->InputAt(0) == potential_length;
+ }
+
+ return false;
+}
+
+void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction) {
+ HInstruction* source = instruction->InputAt(0);
+ HInstruction* destination = instruction->InputAt(2);
+ HInstruction* count = instruction->InputAt(4);
+ SystemArrayCopyOptimizations optimizations(instruction);
+ if (CanEnsureNotNullAt(source, instruction)) {
+ optimizations.SetSourceIsNotNull();
+ }
+ if (CanEnsureNotNullAt(destination, instruction)) {
+ optimizations.SetDestinationIsNotNull();
+ }
+ if (destination == source) {
+ optimizations.SetDestinationIsSource();
+ }
+
+ if (IsArrayLengthOf(count, source)) {
+ optimizations.SetCountIsSourceLength();
+ }
+
+ if (IsArrayLengthOf(count, destination)) {
+ optimizations.SetCountIsDestinationLength();
+ }
+
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ ReferenceTypeInfo destination_rti = destination->GetReferenceTypeInfo();
+ if (destination_rti.IsValid()) {
+ if (destination_rti.IsObjectArray()) {
+ if (destination_rti.IsExact()) {
+ optimizations.SetDoesNotNeedTypeCheck();
+ }
+ optimizations.SetDestinationIsTypedObjectArray();
+ }
+ if (destination_rti.IsPrimitiveArrayClass()) {
+ optimizations.SetDestinationIsPrimitiveArray();
+ } else if (destination_rti.IsNonPrimitiveArrayClass()) {
+ optimizations.SetDestinationIsNonPrimitiveArray();
+ }
+ }
+ ReferenceTypeInfo source_rti = source->GetReferenceTypeInfo();
+ if (source_rti.IsValid()) {
+ if (destination_rti.IsValid() && destination_rti.CanArrayHoldValuesOf(source_rti)) {
+ optimizations.SetDoesNotNeedTypeCheck();
+ }
+ if (source_rti.IsPrimitiveArrayClass()) {
+ optimizations.SetSourceIsPrimitiveArray();
+ } else if (source_rti.IsNonPrimitiveArrayClass()) {
+ optimizations.SetSourceIsNonPrimitiveArray();
+ }
+ }
+ }
+}
+
+void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
+ if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
+ SimplifyStringEquals(instruction);
+ } else if (instruction->GetIntrinsic() == Intrinsics::kSystemArrayCopy) {
+ SimplifySystemArrayCopy(instruction);
+ }
+}
+
} // namespace art
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 95646222ef..dbe75249be 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -210,6 +210,9 @@ static Intrinsics GetIntrinsic(InlineMethod method, InstructionSet instruction_s
case kIntrinsicSystemArrayCopyCharArray:
return Intrinsics::kSystemArrayCopyChar;
+ case kIntrinsicSystemArrayCopy:
+ return Intrinsics::kSystemArrayCopy;
+
// Thread.currentThread.
case kIntrinsicCurrentThread:
return Intrinsics::kThreadCurrentThread;
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index d1a17b6def..e459516e59 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -116,6 +116,80 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
DISALLOW_COPY_AND_ASSIGN(IntrinsicVisitor);
};
+#define GENERIC_OPTIMIZATION(name, bit) \
+public: \
+void Set##name() { SetBit(k##name); } \
+bool Get##name() const { return IsBitSet(k##name); } \
+private: \
+static constexpr int k##name = bit
+
+class IntrinsicOptimizations : public ValueObject {
+ public:
+ explicit IntrinsicOptimizations(HInvoke* invoke) : value_(invoke->GetIntrinsicOptimizations()) {}
+ explicit IntrinsicOptimizations(const HInvoke& invoke)
+ : value_(invoke.GetIntrinsicOptimizations()) {}
+
+ static constexpr int kNumberOfGenericOptimizations = 2;
+ GENERIC_OPTIMIZATION(DoesNotNeedDexCache, 0);
+ GENERIC_OPTIMIZATION(DoesNotNeedEnvironment, 1);
+
+ protected:
+ bool IsBitSet(uint32_t bit) const {
+ return (*value_ & (1 << bit)) != 0u;
+ }
+
+ void SetBit(uint32_t bit) {
+ *(const_cast<uint32_t*>(value_)) |= (1 << bit);
+ }
+
+ private:
+ const uint32_t *value_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicOptimizations);
+};
+
+#undef GENERIC_OPTIMIZATION
+
+#define INTRINSIC_OPTIMIZATION(name, bit) \
+public: \
+void Set##name() { SetBit(k##name); } \
+bool Get##name() const { return IsBitSet(k##name); } \
+private: \
+static constexpr int k##name = bit + kNumberOfGenericOptimizations
+
+class StringEqualsOptimizations : public IntrinsicOptimizations {
+ public:
+ explicit StringEqualsOptimizations(HInvoke* invoke) : IntrinsicOptimizations(invoke) {}
+
+ INTRINSIC_OPTIMIZATION(ArgumentNotNull, 0);
+ INTRINSIC_OPTIMIZATION(ArgumentIsString, 1);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringEqualsOptimizations);
+};
+
+class SystemArrayCopyOptimizations : public IntrinsicOptimizations {
+ public:
+ explicit SystemArrayCopyOptimizations(HInvoke* invoke) : IntrinsicOptimizations(invoke) {}
+
+ INTRINSIC_OPTIMIZATION(SourceIsNotNull, 0);
+ INTRINSIC_OPTIMIZATION(DestinationIsNotNull, 1);
+ INTRINSIC_OPTIMIZATION(DestinationIsSource, 2);
+ INTRINSIC_OPTIMIZATION(CountIsSourceLength, 3);
+ INTRINSIC_OPTIMIZATION(CountIsDestinationLength, 4);
+ INTRINSIC_OPTIMIZATION(DoesNotNeedTypeCheck, 5);
+ INTRINSIC_OPTIMIZATION(DestinationIsTypedObjectArray, 6);
+ INTRINSIC_OPTIMIZATION(DestinationIsNonPrimitiveArray, 7);
+ INTRINSIC_OPTIMIZATION(DestinationIsPrimitiveArray, 8);
+ INTRINSIC_OPTIMIZATION(SourceIsNonPrimitiveArray, 9);
+ INTRINSIC_OPTIMIZATION(SourceIsPrimitiveArray, 10);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SystemArrayCopyOptimizations);
+};
+
+#undef INTRISIC_OPTIMIZATION
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_INTRINSICS_H_
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 938c78e9c1..58e479afc7 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1307,6 +1307,308 @@ void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke)
__ Bind(slow_path->GetExitLabel());
}
+void IntrinsicLocationsBuilderARM::VisitSystemArrayCopy(HInvoke* invoke) {
+ CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+ LocationSummary* locations = invoke->GetLocations();
+ if (locations == nullptr) {
+ return;
+ }
+
+ HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
+ HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
+ HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
+
+ if (src_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(src_pos->GetValue())) {
+ locations->SetInAt(1, Location::RequiresRegister());
+ }
+ if (dest_pos != nullptr && !assembler_->ShifterOperandCanAlwaysHold(dest_pos->GetValue())) {
+ locations->SetInAt(3, Location::RequiresRegister());
+ }
+ if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
+ locations->SetInAt(4, Location::RequiresRegister());
+ }
+}
+
+static void CheckPosition(ArmAssembler* assembler,
+ Location pos,
+ Register input,
+ Location length,
+ SlowPathCode* slow_path,
+ Register input_len,
+ Register temp,
+ bool length_is_input_length = false) {
+ // Where is the length in the Array?
+ const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
+
+ if (pos.IsConstant()) {
+ int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
+ if (pos_const == 0) {
+ if (!length_is_input_length) {
+ // Check that length(input) >= length.
+ __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+ } else {
+ // Check that length(input) >= pos.
+ __ LoadFromOffset(kLoadWord, input_len, input, length_offset);
+ __ subs(temp, input_len, ShifterOperand(pos_const));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that (length(input) - pos) >= length.
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+ } else if (length_is_input_length) {
+ // The only way the copy can succeed is if pos is zero.
+ Register pos_reg = pos.AsRegister<Register>();
+ __ CompareAndBranchIfNonZero(pos_reg, slow_path->GetEntryLabel());
+ } else {
+ // Check that pos >= 0.
+ Register pos_reg = pos.AsRegister<Register>();
+ __ cmp(pos_reg, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that pos <= length(input).
+ __ LoadFromOffset(kLoadWord, temp, input, length_offset);
+ __ subs(temp, temp, ShifterOperand(pos_reg));
+ __ b(slow_path->GetEntryLabel(), LT);
+
+ // Check that (length(input) - pos) >= length.
+ if (length.IsConstant()) {
+ __ cmp(temp, ShifterOperand(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmp(temp, ShifterOperand(length.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+}
+
+void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) {
+ ArmAssembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+ Register src = locations->InAt(0).AsRegister<Register>();
+ Location src_pos = locations->InAt(1);
+ Register dest = locations->InAt(2).AsRegister<Register>();
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ Register temp1 = locations->GetTemp(0).AsRegister<Register>();
+ Register temp2 = locations->GetTemp(1).AsRegister<Register>();
+ Register temp3 = locations->GetTemp(2).AsRegister<Register>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ Label ok;
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (!optimizations.GetDestinationIsSource()) {
+ if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+ __ cmp(src, ShifterOperand(dest));
+ }
+ }
+
+ // If source and destination are the same, we go to slow path if we need to do
+ // forward copying.
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ if (dest_pos.IsConstant()) {
+ // Checked when building locations.
+ DCHECK(!optimizations.GetDestinationIsSource()
+ || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ b(&ok, NE);
+ }
+ __ cmp(dest_pos.AsRegister<Register>(), ShifterOperand(src_pos_constant));
+ __ b(slow_path->GetEntryLabel(), GT);
+ }
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ b(&ok, NE);
+ }
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos_constant));
+ } else {
+ __ cmp(src_pos.AsRegister<Register>(), ShifterOperand(dest_pos.AsRegister<Register>()));
+ }
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+
+ __ Bind(&ok);
+
+ if (!optimizations.GetSourceIsNotNull()) {
+ // Bail out if the source is null.
+ __ CompareAndBranchIfZero(src, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+ // Bail out if the destination is null.
+ __ CompareAndBranchIfZero(dest, slow_path->GetEntryLabel());
+ }
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant() &&
+ !optimizations.GetCountIsSourceLength() &&
+ !optimizations.GetCountIsDestinationLength()) {
+ __ cmp(length.AsRegister<Register>(), ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), LT);
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler,
+ src_pos,
+ src,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsSourceLength());
+
+ // Validity checks: dest.
+ CheckPosition(assembler,
+ dest_pos,
+ dest,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsDestinationLength());
+
+ if (!optimizations.GetDoesNotNeedTypeCheck()) {
+ // Check whether all elements of the source array are assignable to the component
+ // type of the destination array. We do two checks: the classes are the same,
+ // or the destination is Object[]. If none of these checks succeed, we go to the
+ // slow path.
+ __ LoadFromOffset(kLoadWord, temp1, dest, class_offset);
+ __ LoadFromOffset(kLoadWord, temp2, src, class_offset);
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoisoned them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ // Bail out if the destination is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp3, temp2, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ __ cmp(temp1, ShifterOperand(temp2));
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ Label do_copy;
+ __ b(&do_copy, EQ);
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
+ // No need to unpoison the result, we're comparing against null.
+ __ CompareAndBranchIfNonZero(temp1, slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ b(slow_path->GetEntryLabel(), NE);
+ }
+ } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+ // Bail out if the source is not a non primitive array.
+ __ LoadFromOffset(kLoadWord, temp1, src, class_offset);
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ LoadFromOffset(kLoadWord, temp3, temp1, component_offset);
+ __ CompareAndBranchIfZero(temp3, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(temp3);
+ __ LoadFromOffset(kLoadUnsignedHalfword, temp3, temp3, primitive_offset);
+ static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
+ __ CompareAndBranchIfNonZero(temp3, slow_path->GetEntryLabel());
+ }
+
+ // Compute base source address, base destination address, and end source address.
+
+ uint32_t element_size = sizeof(int32_t);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp1, src, element_size * constant + offset);
+ } else {
+ __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, 2));
+ __ AddConstant(temp1, offset);
+ }
+
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp2, dest, element_size * constant + offset);
+ } else {
+ __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, 2));
+ __ AddConstant(temp2, offset);
+ }
+
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ AddConstant(temp3, temp1, element_size * constant);
+ } else {
+ __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, 2));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison, nor do any read barrier as the next uses of the destination
+ // array will do it.
+ Label loop, done;
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&done, EQ);
+ __ Bind(&loop);
+ __ ldr(IP, Address(temp1, element_size, Address::PostIndex));
+ __ str(IP, Address(temp2, element_size, Address::PostIndex));
+ __ cmp(temp1, ShifterOperand(temp3));
+ __ b(&loop, NE);
+ __ Bind(&done);
+
+ // We only need one card marking on the destination array.
+ codegen_->MarkGCCard(temp1,
+ temp2,
+ dest,
+ Register(kNoRegister),
+ false);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
diff --git a/compiler/optimizing/intrinsics_arm.h b/compiler/optimizing/intrinsics_arm.h
index 2abb605e6e..127e9a4aa0 100644
--- a/compiler/optimizing/intrinsics_arm.h
+++ b/compiler/optimizing/intrinsics_arm.h
@@ -33,8 +33,10 @@ class CodeGeneratorARM;
class IntrinsicLocationsBuilderARM FINAL : public IntrinsicVisitor {
public:
- IntrinsicLocationsBuilderARM(ArenaAllocator* arena, const ArmInstructionSetFeatures& features)
- : arena_(arena), features_(features) {}
+ IntrinsicLocationsBuilderARM(ArenaAllocator* arena,
+ ArmAssembler* assembler,
+ const ArmInstructionSetFeatures& features)
+ : arena_(arena), assembler_(assembler), features_(features) {}
// Define visitor methods.
@@ -52,6 +54,7 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
private:
ArenaAllocator* arena_;
+ ArmAssembler* assembler_;
const ArmInstructionSetFeatures& features_;
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index b0cfd0d1bc..4da94ee9b3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1447,6 +1447,7 @@ void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED
}
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index bfe5e55c56..8f1d5e1c4d 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -58,6 +58,7 @@
V(MathRoundDouble, kStatic, kNeedsEnvironmentOrCache) \
V(MathRoundFloat, kStatic, kNeedsEnvironmentOrCache) \
V(SystemArrayCopyChar, kStatic, kNeedsEnvironmentOrCache) \
+ V(SystemArrayCopy, kStatic, kNeedsEnvironmentOrCache) \
V(ThreadCurrentThread, kStatic, kNeedsEnvironmentOrCache) \
V(MemoryPeekByte, kStatic, kNeedsEnvironmentOrCache) \
V(MemoryPeekIntNative, kStatic, kNeedsEnvironmentOrCache) \
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index b60905d682..b8598f3e48 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -43,6 +43,93 @@ ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
return codegen_->GetGraph()->GetArena();
}
+#define __ codegen->GetAssembler()->
+
+static void MoveFromReturnRegister(Location trg,
+ Primitive::Type type,
+ CodeGeneratorMIPS64* codegen) {
+ if (!trg.IsValid()) {
+ DCHECK_EQ(type, Primitive::kPrimVoid);
+ return;
+ }
+
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
+ GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
+ if (trg_reg != V0) {
+ __ Move(V0, trg_reg);
+ }
+ } else {
+ FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
+ if (trg_reg != F0) {
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(F0, trg_reg);
+ } else {
+ __ MovD(F0, trg_reg);
+ }
+ }
+ }
+}
+
+static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
+ InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
+}
+
+// Slow-path for fallback (calling the managed code to handle the
+// intrinsic) in an intrinsified call. This will copy the arguments
+// into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations
+// given by the invoke's location summary. If an intrinsic
+// modifies those locations before a slowpath call, they must be
+// restored!
+class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
+
+ __ Bind(GetEntryLabel());
+
+ SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+ MoveArguments(invoke_, codegen);
+
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ Location::RegisterLocation(A0));
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+ } else {
+ UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+ UNREACHABLE();
+ }
+
+ // Copy the result back to the expected output.
+ Location out = invoke_->GetLocations()->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+ }
+
+ RestoreLiveRegisters(codegen, invoke_->GetLocations());
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+
+ private:
+ // The instruction where this slow path is happening.
+ HInvoke* const invoke_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
+};
+
+#undef __
+
bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
@@ -765,6 +852,270 @@ void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
+// char java.lang.String.charAt(int index)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ // TODO: Maybe we can support range check elimination. Overall,
+ // though, I think it's not worth the cost.
+ // TODO: For simplicity, the index parameter is requested in a
+ // register, so different from Quick we will not optimize the
+ // code for constants (which would save a register).
+
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load the string size
+ __ Lw(TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Revert to slow path if idx is too large, or negative
+ __ Bgeuc(idx, TMP, slow_path->GetEntryLabel());
+
+ // out = obj[2*idx].
+ __ Sll(TMP, idx, 1); // idx * 2
+ __ Daddu(TMP, TMP, obj); // Address of char at location idx
+ __ Lhu(out, TMP, value_offset); // Load char at location idx
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// int java.lang.String.compareTo(String anotherString)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(argument, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize,
+ pStringCompareTo).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ Mips64Assembler* assembler,
+ CodeGeneratorMIPS64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we
+ // don't know statically, or directly dispatch if we have a constant.
+ SlowPathCodeMIPS64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
+ // Always needs the slow-path. We could directly dispatch to it,
+ // but this case should be rare, so for simplicity just put the
+ // full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
+ __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, A2);
+ // Start-index = 0.
+ __ Clear(tmp_reg);
+ } else {
+ __ Slt(TMP, A2, ZERO); // if fromIndex < 0
+ __ Seleqz(A2, A2, TMP); // fromIndex = 0
+ }
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pIndexOf).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+// int java.lang.String.indexOf(int ch)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+// int java.lang.String.indexOf(int ch, int fromIndex)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+// java.lang.String.String(byte[] bytes)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(byte_array, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// java.lang.String.String(char[] value)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+}
+
+// java.lang.String.String(String original)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -794,14 +1145,7 @@ UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCharAt)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringEquals)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
UNIMPLEMENTED_INTRINSIC(LongRotateRight)
UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
@@ -812,6 +1156,7 @@ UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 318d3a6ee8..e83aebb5be 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1054,17 +1054,22 @@ void IntrinsicCodeGeneratorX86::VisitStringEquals(HInvoke* invoke) {
// Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
- // Check if input is null, return false if it is.
- __ testl(arg, arg);
- __ j(kEqual, &return_false);
+ StringEqualsOptimizations optimizations(invoke);
+ if (!optimizations.GetArgumentNotNull()) {
+ // Check if input is null, return false if it is.
+ __ testl(arg, arg);
+ __ j(kEqual, &return_false);
+ }
// Instanceof check for the argument by comparing class fields.
// All string objects must have the same type since String cannot be subclassed.
// Receiver must be a string object, so its class field is equal to all strings' class fields.
// If the argument is a string object, its class field must be equal to receiver's class field.
- __ movl(ecx, Address(str, class_offset));
- __ cmpl(ecx, Address(arg, class_offset));
- __ j(kNotEqual, &return_false);
+ if (!optimizations.GetArgumentIsString()) {
+ __ movl(ecx, Address(str, class_offset));
+ __ cmpl(ecx, Address(arg, class_offset));
+ __ j(kNotEqual, &return_false);
+ }
// Reference equality check, return true if same reference.
__ cmpl(str, arg);
@@ -2250,6 +2255,7 @@ UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(LongRotateRight)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
+UNIMPLEMENTED_INTRINSIC(SystemArrayCopy)
#undef UNIMPLEMENTED_INTRINSIC
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 1a13b699c8..e0d88a91d3 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -620,7 +620,6 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundFloat(HInvoke* invoke) {
codegen_->Load64BitValue(out, kPrimIntMax);
// if inPlusPointFive >= maxInt goto done
- __ movl(out, Immediate(kPrimIntMax));
__ comiss(inPlusPointFive, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
__ j(kAboveEqual, &done);
@@ -668,7 +667,6 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) {
codegen_->Load64BitValue(out, kPrimLongMax);
// if inPlusPointFive >= maxLong goto done
- __ movq(out, Immediate(kPrimLongMax));
__ comisd(inPlusPointFive, codegen_->LiteralDoubleAddress(static_cast<double>(kPrimLongMax)));
__ j(kAboveEqual, &done);
@@ -754,7 +752,7 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke)
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCallOnSlowPath,
kIntrinsified);
- // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
+ // arraycopy(Object src, int src_pos, Object dest, int dest_pos, int length).
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
locations->SetInAt(2, Location::RequiresRegister());
@@ -770,19 +768,27 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyChar(HInvoke* invoke)
static void CheckPosition(X86_64Assembler* assembler,
Location pos,
CpuRegister input,
- CpuRegister length,
+ Location length,
SlowPathCode* slow_path,
CpuRegister input_len,
- CpuRegister temp) {
- // Where is the length in the String?
+ CpuRegister temp,
+ bool length_is_input_length = false) {
+ // Where is the length in the Array?
const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
if (pos.IsConstant()) {
int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
if (pos_const == 0) {
- // Check that length(input) >= length.
- __ cmpl(Address(input, length_offset), length);
- __ j(kLess, slow_path->GetEntryLabel());
+ if (!length_is_input_length) {
+ // Check that length(input) >= length.
+ if (length.IsConstant()) {
+ __ cmpl(Address(input, length_offset),
+ Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(Address(input, length_offset), length.AsRegister<CpuRegister>());
+ }
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
} else {
// Check that length(input) >= pos.
__ movl(input_len, Address(input, length_offset));
@@ -791,9 +797,18 @@ static void CheckPosition(X86_64Assembler* assembler,
// Check that (length(input) - pos) >= length.
__ leal(temp, Address(input_len, -pos_const));
- __ cmpl(temp, length);
+ if (length.IsConstant()) {
+ __ cmpl(temp, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(temp, length.AsRegister<CpuRegister>());
+ }
__ j(kLess, slow_path->GetEntryLabel());
}
+ } else if (length_is_input_length) {
+ // The only way the copy can succeed is if pos is zero.
+ CpuRegister pos_reg = pos.AsRegister<CpuRegister>();
+ __ testl(pos_reg, pos_reg);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
} else {
// Check that pos >= 0.
CpuRegister pos_reg = pos.AsRegister<CpuRegister>();
@@ -807,7 +822,11 @@ static void CheckPosition(X86_64Assembler* assembler,
// Check that (length(input) - pos) >= length.
__ movl(temp, Address(input, length_offset));
__ subl(temp, pos_reg);
- __ cmpl(temp, length);
+ if (length.IsConstant()) {
+ __ cmpl(temp, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ __ cmpl(temp, length.AsRegister<CpuRegister>());
+ }
__ j(kLess, slow_path->GetEntryLabel());
}
}
@@ -817,9 +836,9 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
LocationSummary* locations = invoke->GetLocations();
CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
- Location srcPos = locations->InAt(1);
+ Location src_pos = locations->InAt(1);
CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
- Location destPos = locations->InAt(3);
+ Location dest_pos = locations->InAt(3);
Location length = locations->InAt(4);
// Temporaries that we need for MOVSW.
@@ -852,6 +871,12 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ j(kLess, slow_path->GetEntryLabel());
}
+ // Validity checks: source.
+ CheckPosition(assembler, src_pos, src, length, slow_path, src_base, dest_base);
+
+ // Validity checks: dest.
+ CheckPosition(assembler, dest_pos, dest, length, slow_path, src_base, dest_base);
+
// We need the count in RCX.
if (length.IsConstant()) {
__ movl(count, Immediate(length.GetConstant()->AsIntConstant()->GetValue()));
@@ -859,12 +884,6 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ movl(count, length.AsRegister<CpuRegister>());
}
- // Validity checks: source.
- CheckPosition(assembler, srcPos, src, count, slow_path, src_base, dest_base);
-
- // Validity checks: dest.
- CheckPosition(assembler, destPos, dest, count, slow_path, src_base, dest_base);
-
// Okay, everything checks out. Finally time to do the copy.
// Check assumption that sizeof(Char) is 2 (used in scaling below).
const size_t char_size = Primitive::ComponentSize(Primitive::kPrimChar);
@@ -872,18 +891,18 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
- if (srcPos.IsConstant()) {
- int32_t srcPos_const = srcPos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(src_base, Address(src, char_size * srcPos_const + data_offset));
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(src_base, Address(src, char_size * src_pos_const + data_offset));
} else {
- __ leal(src_base, Address(src, srcPos.AsRegister<CpuRegister>(),
+ __ leal(src_base, Address(src, src_pos.AsRegister<CpuRegister>(),
ScaleFactor::TIMES_2, data_offset));
}
- if (destPos.IsConstant()) {
- int32_t destPos_const = destPos.GetConstant()->AsIntConstant()->GetValue();
- __ leal(dest_base, Address(dest, char_size * destPos_const + data_offset));
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(dest_base, Address(dest, char_size * dest_pos_const + data_offset));
} else {
- __ leal(dest_base, Address(dest, destPos.AsRegister<CpuRegister>(),
+ __ leal(dest_base, Address(dest, dest_pos.AsRegister<CpuRegister>(),
ScaleFactor::TIMES_2, data_offset));
}
@@ -893,6 +912,231 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopyChar(HInvoke* invoke) {
__ Bind(slow_path->GetExitLabel());
}
+
+void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
+ CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
+ X86_64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+ uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+ uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
+
+ CpuRegister src = locations->InAt(0).AsRegister<CpuRegister>();
+ Location src_pos = locations->InAt(1);
+ CpuRegister dest = locations->InAt(2).AsRegister<CpuRegister>();
+ Location dest_pos = locations->InAt(3);
+ Location length = locations->InAt(4);
+ CpuRegister temp1 = locations->GetTemp(0).AsRegister<CpuRegister>();
+ CpuRegister temp2 = locations->GetTemp(1).AsRegister<CpuRegister>();
+ CpuRegister temp3 = locations->GetTemp(2).AsRegister<CpuRegister>();
+
+ SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ NearLabel ok;
+ SystemArrayCopyOptimizations optimizations(invoke);
+
+ if (!optimizations.GetDestinationIsSource()) {
+ if (!src_pos.IsConstant() || !dest_pos.IsConstant()) {
+ __ cmpl(src, dest);
+ }
+ }
+
+ // If source and destination are the same, we go to slow path if we need to do
+ // forward copying.
+ if (src_pos.IsConstant()) {
+ int32_t src_pos_constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ if (dest_pos.IsConstant()) {
+ // Checked when building locations.
+ DCHECK(!optimizations.GetDestinationIsSource()
+ || (src_pos_constant >= dest_pos.GetConstant()->AsIntConstant()->GetValue()));
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ j(kNotEqual, &ok);
+ }
+ __ cmpl(dest_pos.AsRegister<CpuRegister>(), Immediate(src_pos_constant));
+ __ j(kGreater, slow_path->GetEntryLabel());
+ }
+ } else {
+ if (!optimizations.GetDestinationIsSource()) {
+ __ j(kNotEqual, &ok);
+ }
+ if (dest_pos.IsConstant()) {
+ int32_t dest_pos_constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ cmpl(src_pos.AsRegister<CpuRegister>(), Immediate(dest_pos_constant));
+ __ j(kLess, slow_path->GetEntryLabel());
+ } else {
+ __ cmpl(src_pos.AsRegister<CpuRegister>(), dest_pos.AsRegister<CpuRegister>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+ }
+
+ __ Bind(&ok);
+
+ if (!optimizations.GetSourceIsNotNull()) {
+ // Bail out if the source is null.
+ __ testl(src, src);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetDestinationIsNotNull() && !optimizations.GetDestinationIsSource()) {
+ // Bail out if the destination is null.
+ __ testl(dest, dest);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ }
+
+ // If the length is negative, bail out.
+ // We have already checked in the LocationsBuilder for the constant case.
+ if (!length.IsConstant() &&
+ !optimizations.GetCountIsSourceLength() &&
+ !optimizations.GetCountIsDestinationLength()) {
+ __ testl(length.AsRegister<CpuRegister>(), length.AsRegister<CpuRegister>());
+ __ j(kLess, slow_path->GetEntryLabel());
+ }
+
+ // Validity checks: source.
+ CheckPosition(assembler,
+ src_pos,
+ src,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsSourceLength());
+
+ // Validity checks: dest.
+ CheckPosition(assembler,
+ dest_pos,
+ dest,
+ length,
+ slow_path,
+ temp1,
+ temp2,
+ optimizations.GetCountIsDestinationLength());
+
+ if (!optimizations.GetDoesNotNeedTypeCheck()) {
+ // Check whether all elements of the source array are assignable to the component
+ // type of the destination array. We do two checks: the classes are the same,
+ // or the destination is Object[]. If none of these checks succeed, we go to the
+ // slow path.
+ __ movl(temp1, Address(dest, class_offset));
+ __ movl(temp2, Address(src, class_offset));
+ bool did_unpoison = false;
+ if (!optimizations.GetDestinationIsNonPrimitiveArray() ||
+ !optimizations.GetSourceIsNonPrimitiveArray()) {
+ // One or two of the references need to be unpoisoned. Unpoisoned them
+ // both to make the identity check valid.
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ MaybeUnpoisonHeapReference(temp2);
+ did_unpoison = true;
+ }
+
+ if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
+ // Bail out if the destination is not a non primitive array.
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ // Bail out if the source is not a non primitive array.
+ __ movl(CpuRegister(TMP), Address(temp2, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ __ cmpl(temp1, temp2);
+
+ if (optimizations.GetDestinationIsTypedObjectArray()) {
+ NearLabel do_copy;
+ __ j(kEqual, &do_copy);
+ if (!did_unpoison) {
+ __ MaybeUnpoisonHeapReference(temp1);
+ }
+ __ movl(temp1, Address(temp1, component_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ movl(temp1, Address(temp1, super_offset));
+ // No need to unpoison the result, we're comparing against null.
+ __ testl(temp1, temp1);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ Bind(&do_copy);
+ } else {
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+ } else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
+ DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
+ // Bail out if the source is not a non primitive array.
+ __ movl(temp1, Address(src, class_offset));
+ __ MaybeUnpoisonHeapReference(temp1);
+ __ movl(CpuRegister(TMP), Address(temp1, component_offset));
+ __ testl(CpuRegister(TMP), CpuRegister(TMP));
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ MaybeUnpoisonHeapReference(CpuRegister(TMP));
+ __ cmpw(Address(CpuRegister(TMP), primitive_offset), Immediate(Primitive::kPrimNot));
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ }
+
+ // Compute base source address, base destination address, and end source address.
+
+ uint32_t element_size = sizeof(int32_t);
+ uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value();
+ if (src_pos.IsConstant()) {
+ int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp1, Address(src, element_size * constant + offset));
+ } else {
+ __ leal(temp1, Address(src, src_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ if (dest_pos.IsConstant()) {
+ int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp2, Address(dest, element_size * constant + offset));
+ } else {
+ __ leal(temp2, Address(dest, dest_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset));
+ }
+
+ if (length.IsConstant()) {
+ int32_t constant = length.GetConstant()->AsIntConstant()->GetValue();
+ __ leal(temp3, Address(temp1, element_size * constant));
+ } else {
+ __ leal(temp3, Address(temp1, length.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, 0));
+ }
+
+ // Iterate over the arrays and do a raw copy of the objects. We don't need to
+ // poison/unpoison, nor do any read barrier as the next uses of the destination
+ // array will do it.
+ NearLabel loop, done;
+ __ cmpl(temp1, temp3);
+ __ j(kEqual, &done);
+ __ Bind(&loop);
+ __ movl(CpuRegister(TMP), Address(temp1, 0));
+ __ movl(Address(temp2, 0), CpuRegister(TMP));
+ __ addl(temp1, Immediate(element_size));
+ __ addl(temp2, Immediate(element_size));
+ __ cmpl(temp1, temp3);
+ __ j(kNotEqual, &loop);
+ __ Bind(&done);
+
+ // We only need one card marking on the destination array.
+ codegen_->MarkGCCard(temp1,
+ temp2,
+ dest,
+ CpuRegister(kNoRegister),
+ false);
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
void IntrinsicLocationsBuilderX86_64::VisitStringCompareTo(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kCall,
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index ec4a9ec916..558892d01c 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -106,11 +106,11 @@ TEST_F(LICMTest, FieldHoisting) {
NullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -127,11 +127,11 @@ TEST_F(LICMTest, NoFieldHoisting) {
NullHandle<mirror::DexCache> dex_cache;
HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
parameter_, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
- false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache);
+ false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -146,7 +146,7 @@ TEST_F(LICMTest, ArrayHoisting) {
// Populate the loop with instructions: set/get array with different types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong);
+ parameter_, constant_, Primitive::kPrimLong, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
parameter_, constant_, constant_, Primitive::kPrimInt, 0);
@@ -164,7 +164,7 @@ TEST_F(LICMTest, NoArrayHoisting) {
// Populate the loop with instructions: set/get array with same types.
HInstruction* get_array = new (&allocator_) HArrayGet(
- parameter_, constant_, Primitive::kPrimLong);
+ parameter_, constant_, Primitive::kPrimLong, 0);
loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction());
HInstruction* set_array = new (&allocator_) HArraySet(
parameter_, get_array, constant_, Primitive::kPrimLong, 0);
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index b9ab290996..7f67560692 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -77,7 +77,7 @@ TEST(LiveRangesTest, CFG1) {
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
ASSERT_EQ(8u, range->GetEnd());
- HBasicBlock* block = graph->GetBlock(1);
+ HBasicBlock* block = graph->GetBlocks()[1];
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -125,7 +125,7 @@ TEST(LiveRangesTest, CFG2) {
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
ASSERT_EQ(22u, range->GetEnd());
- HBasicBlock* block = graph->GetBlock(3);
+ HBasicBlock* block = graph->GetBlocks()[3];
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index de4fb7e201..d014379bca 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -481,12 +481,10 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
bool intrinsified = false);
void SetInAt(uint32_t at, Location location) {
- DCHECK_LT(at, GetInputCount());
inputs_[at] = location;
}
Location InAt(uint32_t at) const {
- DCHECK_LT(at, GetInputCount());
return inputs_[at];
}
@@ -514,12 +512,10 @@ class LocationSummary : public ArenaObject<kArenaAllocLocationSummary> {
}
Location GetTemp(uint32_t at) const {
- DCHECK_LT(at, GetTempCount());
return temps_[at];
}
void SetTempAt(uint32_t at, Location location) {
- DCHECK_LT(at, GetTempCount());
DCHECK(temps_[at].IsUnallocated() || temps_[at].IsInvalid());
temps_[at] = location;
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 989970fb49..ed401b67c5 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "base/bit_utils.h"
#include "base/stl_util.h"
+#include "intrinsics.h"
#include "mirror/class-inl.h"
#include "scoped_thread_state_change.h"
@@ -54,7 +55,6 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) {
visiting.ClearBit(current_id);
worklist.pop_back();
} else {
- DCHECK_LT(successors_visited[current_id], current->GetSuccessors().size());
HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
uint32_t successor_id = successor->GetBlockId();
if (visiting.IsBitSet(successor_id)) {
@@ -88,7 +88,7 @@ static void RemoveAsUser(HInstruction* instruction) {
void HGraph::RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const {
for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
- HBasicBlock* block = GetBlock(i);
+ HBasicBlock* block = blocks_[i];
DCHECK(block->GetPhis().IsEmpty()) << "Phis are not inserted at this stage";
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
RemoveAsUser(it.Current());
@@ -100,7 +100,7 @@ void HGraph::RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visit
void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
- HBasicBlock* block = GetBlock(i);
+ HBasicBlock* block = blocks_[i];
// We only need to update the successor, which might be live.
for (HBasicBlock* successor : block->GetSuccessors()) {
successor->RemovePredecessor(block);
@@ -174,7 +174,6 @@ void HGraph::ComputeDominanceInformation() {
if (successors_visited[current_id] == current->GetSuccessors().size()) {
worklist.pop_back();
} else {
- DCHECK_LT(successors_visited[current_id], current->GetSuccessors().size());
HBasicBlock* successor = current->GetSuccessors()[successors_visited[current_id]++];
if (successor->GetDominator() == nullptr) {
@@ -185,7 +184,6 @@ void HGraph::ComputeDominanceInformation() {
// Once all the forward edges have been visited, we know the immediate
// dominator of the block. We can then start visiting its successors.
- DCHECK_LT(successor->GetBlockId(), visits.size());
if (++visits[successor->GetBlockId()] ==
successor->GetPredecessors().size() - successor->NumberOfBackEdges()) {
successor->GetDominator()->AddDominatedBlock(successor);
@@ -257,7 +255,7 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
pre_header->AddInstruction(new (arena_) HGoto(header->GetDexPc()));
for (size_t pred = 0; pred < header->GetPredecessors().size(); ++pred) {
- HBasicBlock* predecessor = header->GetPredecessor(pred);
+ HBasicBlock* predecessor = header->GetPredecessors()[pred];
if (!info->IsBackEdge(*predecessor)) {
predecessor->ReplaceSuccessor(header, pre_header);
pred--;
@@ -267,10 +265,10 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
}
// Make sure the first predecessor of a loop header is the incoming block.
- if (info->IsBackEdge(*header->GetPredecessor(0))) {
- HBasicBlock* to_swap = header->GetPredecessor(0);
+ if (info->IsBackEdge(*header->GetPredecessors()[0])) {
+ HBasicBlock* to_swap = header->GetPredecessors()[0];
for (size_t pred = 1, e = header->GetPredecessors().size(); pred < e; ++pred) {
- HBasicBlock* predecessor = header->GetPredecessor(pred);
+ HBasicBlock* predecessor = header->GetPredecessors()[pred];
if (!info->IsBackEdge(*predecessor)) {
header->predecessors_[pred] = to_swap;
header->predecessors_[0] = predecessor;
@@ -293,7 +291,7 @@ void HGraph::SimplifyLoop(HBasicBlock* header) {
}
static bool CheckIfPredecessorAtIsExceptional(const HBasicBlock& block, size_t pred_idx) {
- HBasicBlock* predecessor = block.GetPredecessor(pred_idx);
+ HBasicBlock* predecessor = block.GetPredecessors()[pred_idx];
if (!predecessor->EndsWithTryBoundary()) {
// Only edges from HTryBoundary can be exceptional.
return false;
@@ -343,7 +341,7 @@ void HGraph::SimplifyCatchBlocks() {
HBasicBlock* normal_block = catch_block->SplitBefore(catch_block->GetFirstInstruction());
for (size_t j = 0; j < catch_block->GetPredecessors().size(); ++j) {
if (!CheckIfPredecessorAtIsExceptional(*catch_block, j)) {
- catch_block->GetPredecessor(j)->ReplaceSuccessor(catch_block, normal_block);
+ catch_block->GetPredecessors()[j]->ReplaceSuccessor(catch_block, normal_block);
--j;
}
}
@@ -365,7 +363,7 @@ void HGraph::ComputeTryBlockInformation() {
// Infer try membership from the first predecessor. Having simplified loops,
// the first predecessor can never be a back edge and therefore it must have
// been visited already and had its try membership set.
- HBasicBlock* first_predecessor = block->GetPredecessor(0);
+ HBasicBlock* first_predecessor = block->GetPredecessors()[0];
DCHECK(!block->IsLoopHeader() || !block->GetLoopInformation()->IsBackEdge(*first_predecessor));
const HTryBoundary* try_entry = first_predecessor->ComputeTryEntryOfSuccessors();
if (try_entry != nullptr) {
@@ -385,7 +383,7 @@ void HGraph::SimplifyCFG() {
if (block == nullptr) continue;
if (block->NumberOfNormalSuccessors() > 1) {
for (size_t j = 0; j < block->GetSuccessors().size(); ++j) {
- HBasicBlock* successor = block->GetSuccessor(j);
+ HBasicBlock* successor = block->GetSuccessors()[j];
DCHECK(!successor->IsCatchBlock());
if (successor->GetPredecessors().size() > 1) {
SplitCriticalEdge(block, successor);
@@ -534,7 +532,7 @@ bool HLoopInformation::Populate() {
void HLoopInformation::Update() {
HGraph* graph = header_->GetGraph();
for (uint32_t id : blocks_.Indexes()) {
- HBasicBlock* block = graph->GetBlock(id);
+ HBasicBlock* block = graph->GetBlocks()[id];
// Reset loop information of non-header blocks inside the loop, except
// members of inner nested loops because those should already have been
// updated by their own LoopInformation.
@@ -608,8 +606,23 @@ static void UpdateInputsUsers(HInstruction* instruction) {
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
- InsertInstructionBefore(replacement, initial);
- initial->ReplaceWith(replacement);
+ if (initial->IsControlFlow()) {
+ // We can only replace a control flow instruction with another control flow instruction.
+ DCHECK(replacement->IsControlFlow());
+ DCHECK_EQ(replacement->GetId(), -1);
+ DCHECK_EQ(replacement->GetType(), Primitive::kPrimVoid);
+ DCHECK_EQ(initial->GetBlock(), this);
+ DCHECK_EQ(initial->GetType(), Primitive::kPrimVoid);
+ DCHECK(initial->GetUses().IsEmpty());
+ DCHECK(initial->GetEnvUses().IsEmpty());
+ replacement->SetBlock(this);
+ replacement->SetId(GetGraph()->GetNextInstructionId());
+ instructions_.InsertInstructionBefore(replacement, initial);
+ UpdateInputsUsers(replacement);
+ } else {
+ InsertInstructionBefore(replacement, initial);
+ initial->ReplaceWith(replacement);
+ }
RemoveInstruction(initial);
}
@@ -743,7 +756,6 @@ void HEnvironment::CopyFromWithLoopPhiAdjustment(HEnvironment* env,
}
void HEnvironment::RemoveAsUserOfInput(size_t index) const {
- DCHECK_LT(index, Size());
const HUserRecord<HEnvironment*>& user_record = vregs_[index];
user_record.GetInstruction()->RemoveEnvironmentUser(user_record.GetUseNode());
}
@@ -1435,7 +1447,7 @@ void HBasicBlock::MergeWith(HBasicBlock* other) {
// Update links to the successors of `other`.
successors_.clear();
while (!other->successors_.empty()) {
- HBasicBlock* successor = other->GetSuccessor(0);
+ HBasicBlock* successor = other->GetSuccessors()[0];
successor->ReplacePredecessor(other, this);
}
@@ -1472,7 +1484,7 @@ void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
// Update links to the successors of `other`.
successors_.clear();
while (!other->successors_.empty()) {
- HBasicBlock* successor = other->GetSuccessor(0);
+ HBasicBlock* successor = other->GetSuccessors()[0];
successor->ReplacePredecessor(other, this);
}
@@ -1488,11 +1500,11 @@ void HBasicBlock::MergeWithInlined(HBasicBlock* other) {
void HBasicBlock::ReplaceWith(HBasicBlock* other) {
while (!GetPredecessors().empty()) {
- HBasicBlock* predecessor = GetPredecessor(0);
+ HBasicBlock* predecessor = GetPredecessors()[0];
predecessor->ReplaceSuccessor(this, other);
}
while (!GetSuccessors().empty()) {
- HBasicBlock* successor = GetSuccessor(0);
+ HBasicBlock* successor = GetSuccessors()[0];
successor->ReplacePredecessor(this, other);
}
for (HBasicBlock* dominated : GetDominatedBlocks()) {
@@ -1567,9 +1579,9 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
if (GetBlocks().size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
// Put the body block's instruction into `invoke`'s block.
- HBasicBlock* body = GetBlock(1);
- DCHECK(GetBlock(0)->IsEntryBlock());
- DCHECK(GetBlock(2)->IsExitBlock());
+ HBasicBlock* body = GetBlocks()[1];
+ DCHECK(GetBlocks()[0]->IsEntryBlock());
+ DCHECK(GetBlocks()[2]->IsExitBlock());
DCHECK(!body->IsExitBlock());
HInstruction* last = body->GetLastInstruction();
@@ -1594,16 +1606,16 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
HBasicBlock* at = invoke->GetBlock();
HBasicBlock* to = at->SplitAfter(invoke);
- HBasicBlock* first = entry_block_->GetSuccessor(0);
+ HBasicBlock* first = entry_block_->GetSuccessors()[0];
DCHECK(!first->IsInLoop());
at->MergeWithInlined(first);
exit_block_->ReplaceWith(to);
// Update all predecessors of the exit block (now the `to` block)
// to not `HReturn` but `HGoto` instead.
- bool returns_void = to->GetPredecessor(0)->GetLastInstruction()->IsReturnVoid();
+ bool returns_void = to->GetPredecessors()[0]->GetLastInstruction()->IsReturnVoid();
if (to->GetPredecessors().size() == 1) {
- HBasicBlock* predecessor = to->GetPredecessor(0);
+ HBasicBlock* predecessor = to->GetPredecessors()[0];
HInstruction* last = predecessor->GetLastInstruction();
if (!returns_void) {
return_value = last->InputAt(0);
@@ -1873,6 +1885,35 @@ bool HInstruction::HasAnyEnvironmentUseBefore(HInstruction* other) {
return false;
}
+void HInvoke::SetIntrinsic(Intrinsics intrinsic,
+ IntrinsicNeedsEnvironmentOrCache needs_env_or_cache) {
+ intrinsic_ = intrinsic;
+ IntrinsicOptimizations opt(this);
+ if (needs_env_or_cache == kNoEnvironmentOrCache) {
+ opt.SetDoesNotNeedDexCache();
+ opt.SetDoesNotNeedEnvironment();
+ }
+}
+
+bool HInvoke::NeedsEnvironment() const {
+ if (!IsIntrinsic()) {
+ return true;
+ }
+ IntrinsicOptimizations opt(*this);
+ return !opt.GetDoesNotNeedEnvironment();
+}
+
+bool HInvokeStaticOrDirect::NeedsDexCache() const {
+ if (IsRecursive() || IsStringInit()) {
+ return false;
+ }
+ if (!IsIntrinsic()) {
+ return true;
+ }
+ IntrinsicOptimizations opt(*this);
+ return !opt.GetDoesNotNeedDexCache();
+}
+
void HInstruction::RemoveEnvironmentUsers() {
for (HUseIterator<HEnvironment*> use_it(GetEnvUses()); !use_it.Done(); use_it.Advance()) {
HUseListNode<HEnvironment*>* user_node = use_it.Current();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 849f876f36..939e62c6dd 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -81,12 +81,19 @@ static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
static constexpr uint32_t kNoDexPc = -1;
enum IfCondition {
- kCondEQ,
- kCondNE,
- kCondLT,
- kCondLE,
- kCondGT,
- kCondGE,
+ // All types.
+ kCondEQ, // ==
+ kCondNE, // !=
+ // Signed integers and floating-point numbers.
+ kCondLT, // <
+ kCondLE, // <=
+ kCondGT, // >
+ kCondGE, // >=
+ // Unsigned integers.
+ kCondB, // <
+ kCondBE, // <=
+ kCondA, // >
+ kCondAE, // >=
};
class HInstructionList : public ValueObject {
@@ -177,11 +184,6 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
ArenaAllocator* GetArena() const { return arena_; }
const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
- HBasicBlock* GetBlock(size_t id) const {
- DCHECK_LT(id, blocks_.size());
- return blocks_[id];
- }
-
bool IsInSsaForm() const { return in_ssa_form_; }
HBasicBlock* GetEntryBlock() const { return entry_block_; }
@@ -648,20 +650,10 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
return predecessors_;
}
- HBasicBlock* GetPredecessor(size_t pred_idx) const {
- DCHECK_LT(pred_idx, predecessors_.size());
- return predecessors_[pred_idx];
- }
-
const ArenaVector<HBasicBlock*>& GetSuccessors() const {
return successors_;
}
- HBasicBlock* GetSuccessor(size_t succ_idx) const {
- DCHECK_LT(succ_idx, successors_.size());
- return successors_[succ_idx];
- }
-
bool HasSuccessor(const HBasicBlock* block, size_t start_from = 0u) {
return ContainsElement(successors_, block, start_from);
}
@@ -797,18 +789,18 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
HBasicBlock* GetSinglePredecessor() const {
DCHECK_EQ(GetPredecessors().size(), 1u);
- return GetPredecessor(0);
+ return GetPredecessors()[0];
}
HBasicBlock* GetSingleSuccessor() const {
DCHECK_EQ(GetSuccessors().size(), 1u);
- return GetSuccessor(0);
+ return GetSuccessors()[0];
}
// Returns whether the first occurrence of `predecessor` in the list of
// predecessors is at index `idx`.
bool IsFirstIndexOfPredecessor(HBasicBlock* predecessor, size_t idx) const {
- DCHECK_EQ(GetPredecessor(idx), predecessor);
+ DCHECK_EQ(GetPredecessors()[idx], predecessor);
return GetPredecessorIndexOf(predecessor) == idx;
}
@@ -886,7 +878,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
bool IsLoopPreHeaderFirstPredecessor() const {
DCHECK(IsLoopHeader());
- return GetPredecessor(0) == GetLoopInformation()->GetPreHeader();
+ return GetPredecessors()[0] == GetLoopInformation()->GetPreHeader();
}
HLoopInformation* GetLoopInformation() const {
@@ -1003,11 +995,15 @@ class HLoopInformationOutwardIterator : public ValueObject {
};
#define FOR_EACH_CONCRETE_INSTRUCTION_COMMON(M) \
+ M(Above, Condition) \
+ M(AboveOrEqual, Condition) \
M(Add, BinaryOperation) \
M(And, BinaryOperation) \
M(ArrayGet, Instruction) \
M(ArrayLength, Instruction) \
M(ArraySet, Instruction) \
+ M(Below, Condition) \
+ M(BelowOrEqual, Condition) \
M(BooleanNot, UnaryOperation) \
M(BoundsCheck, Instruction) \
M(BoundType, Instruction) \
@@ -1089,7 +1085,8 @@ class HLoopInformationOutwardIterator : public ValueObject {
#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M) \
M(X86ComputeBaseMethodAddress, Instruction) \
- M(X86LoadFromConstantTable, Instruction)
+ M(X86LoadFromConstantTable, Instruction) \
+ M(X86PackedSwitch, Instruction)
#define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
@@ -1559,12 +1556,10 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
void CopyFromWithLoopPhiAdjustment(HEnvironment* env, HBasicBlock* loop_header);
void SetRawEnvAt(size_t index, HInstruction* instruction) {
- DCHECK_LT(index, Size());
vregs_[index] = HUserRecord<HEnvironment*>(instruction);
}
HInstruction* GetInstructionAt(size_t index) const {
- DCHECK_LT(index, Size());
return vregs_[index].GetInstruction();
}
@@ -1575,12 +1570,10 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
HEnvironment* GetParent() const { return parent_; }
void SetLocationAt(size_t index, Location location) {
- DCHECK_LT(index, Size());
locations_[index] = location;
}
Location GetLocationAt(size_t index) const {
- DCHECK_LT(index, Size());
return locations_[index];
}
@@ -1610,7 +1603,6 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> {
void RecordEnvUse(HUseListNode<HEnvironment*>* env_use) {
DCHECK(env_use->GetUser() == this);
size_t index = env_use->GetIndex();
- DCHECK_LT(index, Size());
vregs_[index] = HUserRecord<HEnvironment*>(vregs_[index], env_use);
}
@@ -1656,6 +1648,11 @@ class ReferenceTypeInfo : ValueObject {
return GetTypeHandle()->IsObjectClass();
}
+ bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsStringClass();
+ }
+
bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(IsValid());
return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
@@ -1667,15 +1664,36 @@ class ReferenceTypeInfo : ValueObject {
}
bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass();
}
+ bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsPrimitiveArray();
+ }
+
+ bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
+ }
+
bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(IsValid());
+ if (!IsExact()) return false;
+ if (!IsArrayClass()) return false;
+ if (!rti.IsArrayClass()) return false;
+ return GetTypeHandle()->GetComponentType()->IsAssignableFrom(
+ rti.GetTypeHandle()->GetComponentType());
+ }
+
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -1715,7 +1733,7 @@ std::ostream& operator<<(std::ostream& os, const ReferenceTypeInfo& rhs);
class HInstruction : public ArenaObject<kArenaAllocInstruction> {
public:
- HInstruction(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HInstruction(SideEffects side_effects, uint32_t dex_pc)
: previous_(nullptr),
next_(nullptr),
block_(nullptr),
@@ -1781,8 +1799,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
return true;
}
- virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
- UNUSED(obj);
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const {
return false;
}
@@ -1899,16 +1916,14 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionTypeEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
@@ -2072,7 +2087,7 @@ class HBackwardInstructionIterator : public ValueObject {
template<size_t N>
class HTemplateInstruction: public HInstruction {
public:
- HTemplateInstruction<N>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HTemplateInstruction<N>(SideEffects side_effects, uint32_t dex_pc)
: HInstruction(side_effects, dex_pc), inputs_() {}
virtual ~HTemplateInstruction() {}
@@ -2099,7 +2114,7 @@ class HTemplateInstruction: public HInstruction {
template<>
class HTemplateInstruction<0>: public HInstruction {
public:
- explicit HTemplateInstruction<0>(SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ explicit HTemplateInstruction<0>(SideEffects side_effects, uint32_t dex_pc)
: HInstruction(side_effects, dex_pc) {}
virtual ~HTemplateInstruction() {}
@@ -2125,7 +2140,7 @@ class HTemplateInstruction<0>: public HInstruction {
template<intptr_t N>
class HExpression : public HTemplateInstruction<N> {
public:
- HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc = kNoDexPc)
+ HExpression<N>(Primitive::Type type, SideEffects side_effects, uint32_t dex_pc)
: HTemplateInstruction<N>(side_effects, dex_pc), type_(type) {}
virtual ~HExpression() {}
@@ -2315,11 +2330,11 @@ class HIf : public HTemplateInstruction<1> {
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
- return GetBlock()->GetSuccessor(0);
+ return GetBlock()->GetSuccessors()[0];
}
HBasicBlock* IfFalseSuccessor() const {
- return GetBlock()->GetSuccessor(1);
+ return GetBlock()->GetSuccessors()[1];
}
DECLARE_INSTRUCTION(If);
@@ -2347,7 +2362,7 @@ class HTryBoundary : public HTemplateInstruction<0> {
bool IsControlFlow() const OVERRIDE { return true; }
// Returns the block's non-exceptional successor (index zero).
- HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessor(0); }
+ HBasicBlock* GetNormalFlowSuccessor() const { return GetBlock()->GetSuccessors()[0]; }
// Returns whether `handler` is among its exception handlers (non-zero index
// successors).
@@ -2384,7 +2399,7 @@ class HExceptionHandlerIterator : public ValueObject {
: block_(*try_boundary.GetBlock()), index_(block_.NumberOfNormalSuccessors()) {}
bool Done() const { return index_ == block_.GetSuccessors().size(); }
- HBasicBlock* Current() const { return block_.GetSuccessor(index_); }
+ HBasicBlock* Current() const { return block_.GetSuccessors()[index_]; }
size_t CurrentSuccessorIndex() const { return index_; }
void Advance() { ++index_; }
@@ -2449,7 +2464,7 @@ class HPackedSwitch : public HTemplateInstruction<1> {
HBasicBlock* GetDefaultBlock() const {
// Last entry is the default block.
- return GetBlock()->GetSuccessor(num_entries_);
+ return GetBlock()->GetSuccessors()[num_entries_];
}
DECLARE_INSTRUCTION(PackedSwitch);
@@ -2471,8 +2486,7 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2542,8 +2556,7 @@ class HBinaryOperation : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2647,8 +2660,6 @@ class HEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x == y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2669,6 +2680,8 @@ class HEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x == y; }
+
DISALLOW_COPY_AND_ASSIGN(HEqual);
};
@@ -2679,8 +2692,6 @@ class HNotEqual : public HCondition {
bool IsCommutative() const OVERRIDE { return true; }
- template <typename T> bool Compute(T x, T y) const { return x != y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2701,6 +2712,8 @@ class HNotEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x != y; }
+
DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
@@ -2709,8 +2722,6 @@ class HLessThan : public HCondition {
HLessThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x < y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2731,6 +2742,8 @@ class HLessThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
@@ -2739,8 +2752,6 @@ class HLessThanOrEqual : public HCondition {
HLessThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x <= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2761,6 +2772,8 @@ class HLessThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
@@ -2769,8 +2782,6 @@ class HGreaterThan : public HCondition {
HGreaterThan(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x > y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2791,6 +2802,8 @@ class HGreaterThan : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
@@ -2799,8 +2812,6 @@ class HGreaterThanOrEqual : public HCondition {
HGreaterThanOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
: HCondition(first, second, dex_pc) {}
- template <typename T> bool Compute(T x, T y) const { return x >= y; }
-
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
@@ -2821,9 +2832,138 @@ class HGreaterThanOrEqual : public HCondition {
}
private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
+class HBelow : public HCondition {
+ public:
+ HBelow(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Below);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x < y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelow);
+};
+
+class HBelowOrEqual : public HCondition {
+ public:
+ HBelowOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(BelowOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x <= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
+};
+
+class HAbove : public HCondition {
+ public:
+ HAbove(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(Above);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondA;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondBE;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x > y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAbove);
+};
+
+class HAboveOrEqual : public HCondition {
+ public:
+ HAboveOrEqual(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
+ : HCondition(first, second, dex_pc) {}
+
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint32_t>(x->GetValue()),
+ static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ }
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetIntConstant(
+ Compute(static_cast<uint64_t>(x->GetValue()),
+ static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ }
+
+ DECLARE_INSTRUCTION(AboveOrEqual);
+
+ IfCondition GetCondition() const OVERRIDE {
+ return kCondAE;
+ }
+
+ IfCondition GetOppositeCondition() const OVERRIDE {
+ return kCondB;
+ }
+
+ private:
+ template <typename T> bool Compute(T x, T y) const { return x >= y; }
+
+ DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
+};
// Instruction to check how two inputs compare to each other.
// Result is 0 if input0 == input1, 1 if input0 > input1, or -1 if input0 < input1.
@@ -3034,11 +3174,7 @@ class HInvoke : public HInstruction {
public:
size_t InputCount() const OVERRIDE { return inputs_.size(); }
- // Runtime needs to walk the stack, so Dex -> Dex calls need to
- // know their environment.
- bool NeedsEnvironment() const OVERRIDE {
- return needs_environment_or_cache_ == kNeedsEnvironmentOrCache;
- }
+ bool NeedsEnvironment() const OVERRIDE;
void SetArgumentAt(size_t index, HInstruction* argument) {
SetRawInputAt(index, argument);
@@ -3062,10 +3198,7 @@ class HInvoke : public HInstruction {
return intrinsic_;
}
- void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironmentOrCache needs_env_or_cache) {
- intrinsic_ = intrinsic;
- needs_environment_or_cache_ = needs_env_or_cache;
- }
+ void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironmentOrCache needs_env_or_cache);
bool IsFromInlinedInvoke() const {
return GetEnvironment()->GetParent() != nullptr;
@@ -3073,6 +3206,16 @@ class HInvoke : public HInstruction {
bool CanThrow() const OVERRIDE { return true; }
+ uint32_t* GetIntrinsicOptimizations() {
+ return &intrinsic_optimizations_;
+ }
+
+ const uint32_t* GetIntrinsicOptimizations() const {
+ return &intrinsic_optimizations_;
+ }
+
+ bool IsIntrinsic() const { return intrinsic_ != Intrinsics::kNone; }
+
DECLARE_INSTRUCTION(Invoke);
protected:
@@ -3092,16 +3235,14 @@ class HInvoke : public HInstruction {
dex_method_index_(dex_method_index),
original_invoke_type_(original_invoke_type),
intrinsic_(Intrinsics::kNone),
- needs_environment_or_cache_(kNeedsEnvironmentOrCache) {
+ intrinsic_optimizations_(0) {
}
const HUserRecord<HInstruction*> InputRecordAt(size_t index) const OVERRIDE {
- DCHECK_LT(index, InputCount());
return inputs_[index];
}
void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) OVERRIDE {
- DCHECK_LT(index, InputCount());
inputs_[index] = input;
}
@@ -3111,7 +3252,9 @@ class HInvoke : public HInstruction {
const uint32_t dex_method_index_;
const InvokeType original_invoke_type_;
Intrinsics intrinsic_;
- IntrinsicNeedsEnvironmentOrCache needs_environment_or_cache_;
+
+ // A magic word holding optimizations for intrinsics. See intrinsics.h.
+ uint32_t intrinsic_optimizations_;
private:
DISALLOW_COPY_AND_ASSIGN(HInvoke);
@@ -3244,8 +3387,7 @@ class HInvokeStaticOrDirect : public HInvoke {
target_method_(target_method),
dispatch_info_(dispatch_info) {}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
@@ -3259,10 +3401,7 @@ class HInvokeStaticOrDirect : public HInvoke {
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCache() const OVERRIDE {
- if (intrinsic_ != Intrinsics::kNone) { return needs_environment_or_cache_; }
- return !IsRecursive() && !IsStringInit();
- }
+ bool NeedsDexCache() const OVERRIDE;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
@@ -3686,8 +3825,7 @@ class HDivZeroCheck : public HExpression<1> {
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3957,8 +4095,7 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3983,8 +4120,7 @@ class HBooleanNot : public HUnaryOperation {
: HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4125,12 +4261,10 @@ class HPhi : public HInstruction {
protected:
const HUserRecord<HInstruction*> InputRecordAt(size_t index) const OVERRIDE {
- DCHECK_LE(index, InputCount());
return inputs_[index];
}
void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) OVERRIDE {
- DCHECK_LE(index, InputCount());
inputs_[index] = input;
}
@@ -4152,8 +4286,7 @@ class HNullCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4210,7 +4343,7 @@ class HInstanceFieldGet : public HExpression<1> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(
field_type,
SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
@@ -4256,7 +4389,7 @@ class HInstanceFieldSet : public HTemplateInstruction<2> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HTemplateInstruction(
SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
@@ -4291,19 +4424,17 @@ class HArrayGet : public HExpression<2> {
HArrayGet(HInstruction* array,
HInstruction* index,
Primitive::Type type,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
SetRawInputAt(0, array);
SetRawInputAt(1, index);
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -4351,8 +4482,7 @@ class HArraySet : public HTemplateInstruction<3> {
// Can throw ArrayStoreException.
bool CanThrow() const OVERRIDE { return needs_type_check_; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: Same as for ArrayGet.
return false;
}
@@ -4407,7 +4537,7 @@ class HArraySet : public HTemplateInstruction<3> {
class HArrayLength : public HExpression<1> {
public:
- explicit HArrayLength(HInstruction* array, uint32_t dex_pc = kNoDexPc)
+ HArrayLength(HInstruction* array, uint32_t dex_pc)
: HExpression(Primitive::kPrimInt, SideEffects::None(), dex_pc) {
// Note that arrays do not change length, so the instruction does not
// depend on any write.
@@ -4415,8 +4545,7 @@ class HArrayLength : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4439,8 +4568,7 @@ class HBoundsCheck : public HExpression<2> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4513,20 +4641,29 @@ class HLoadClass : public HExpression<1> {
uint16_t type_index,
const DexFile& dex_file,
bool is_referrers_class,
- uint32_t dex_pc)
+ uint32_t dex_pc,
+ bool needs_access_check)
: HExpression(Primitive::kPrimNot, SideEffectsForArchRuntimeCalls(), dex_pc),
type_index_(type_index),
dex_file_(dex_file),
is_referrers_class_(is_referrers_class),
generate_clinit_check_(false),
+ needs_access_check_(needs_access_check),
loaded_class_rti_(ReferenceTypeInfo::CreateInvalid()) {
+ // Referrers class should not need access check. We never inline unverified
+ // methods so we can't possibly end up in this situation.
+ DCHECK(!is_referrers_class_ || !needs_access_check_);
SetRawInputAt(0, current_method);
}
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- return other->AsLoadClass()->type_index_ == type_index_;
+ // Note that we don't need to test for generate_clinit_check_.
+ // Whether or not we need to generate the clinit check is processed in
+ // prepare_for_register_allocator based on existing HInvokes and HClinitChecks.
+ return other->AsLoadClass()->type_index_ == type_index_ &&
+ other->AsLoadClass()->needs_access_check_ == needs_access_check_;
}
size_t ComputeHashCode() const OVERRIDE { return type_index_; }
@@ -4544,13 +4681,16 @@ class HLoadClass : public HExpression<1> {
bool MustGenerateClinitCheck() const {
return generate_clinit_check_;
}
-
void SetMustGenerateClinitCheck(bool generate_clinit_check) {
generate_clinit_check_ = generate_clinit_check;
}
bool CanCallRuntime() const {
- return MustGenerateClinitCheck() || !is_referrers_class_;
+ return MustGenerateClinitCheck() || !is_referrers_class_ || needs_access_check_;
+ }
+
+ bool NeedsAccessCheck() const {
+ return needs_access_check_;
}
bool CanThrow() const OVERRIDE {
@@ -4586,6 +4726,7 @@ class HLoadClass : public HExpression<1> {
// Whether this instruction must generate the initialization check.
// Used for code generation.
bool generate_clinit_check_;
+ bool needs_access_check_;
ReferenceTypeInfo loaded_class_rti_;
@@ -4641,8 +4782,7 @@ class HClinitCheck : public HExpression<1> {
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4669,7 +4809,7 @@ class HStaticFieldGet : public HExpression<1> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HExpression(
field_type,
SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
@@ -4712,7 +4852,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
uint32_t field_idx,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache,
- uint32_t dex_pc = kNoDexPc)
+ uint32_t dex_pc)
: HTemplateInstruction(
SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
@@ -4897,6 +5037,7 @@ class HThrow : public HTemplateInstruction<1> {
* or `HCheckCast`.
*/
enum class TypeCheckKind {
+ kUnresolvedCheck, // Check against an unresolved type.
kExactCheck, // Can do a single class compare.
kClassHierarchyCheck, // Can just walk the super class chain.
kAbstractClassCheck, // Can just walk the super class chain, starting one up.
@@ -5233,7 +5374,6 @@ class HParallelMove : public HTemplateInstruction<0> {
}
MoveOperands* MoveOperandsAt(size_t index) {
- DCHECK_LT(index, moves_.size());
return &moves_[index];
}
@@ -5260,7 +5400,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
+ virtual void VisitInstruction(HInstruction* instruction ATTRIBUTE_UNUSED) {}
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
@@ -5307,7 +5447,7 @@ class HInsertionOrderIterator : public ValueObject {
explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
bool Done() const { return index_ == graph_.GetBlocks().size(); }
- HBasicBlock* Current() const { return graph_.GetBlock(index_); }
+ HBasicBlock* Current() const { return graph_.GetBlocks()[index_]; }
void Advance() { ++index_; }
private:
@@ -5433,7 +5573,6 @@ class HBlocksInLoopReversePostOrderIterator : public ValueObject {
: blocks_in_loop_(info.GetBlocks()),
blocks_(info.GetHeader()->GetGraph()->GetReversePostOrder()),
index_(0) {
- DCHECK(!blocks_.empty());
if (!blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
Advance();
}
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index ddc5730215..556217bf74 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -23,7 +23,8 @@ namespace art {
class HX86ComputeBaseMethodAddress : public HExpression<0> {
public:
// Treat the value as an int32_t, but it is really a 32 bit native pointer.
- HX86ComputeBaseMethodAddress() : HExpression(Primitive::kPrimInt, SideEffects::None()) {}
+ HX86ComputeBaseMethodAddress()
+ : HExpression(Primitive::kPrimInt, SideEffects::None(), kNoDexPc) {}
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
@@ -37,7 +38,7 @@ class HX86LoadFromConstantTable : public HExpression<2> {
HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
HConstant* constant,
bool needs_materialization = true)
- : HExpression(constant->GetType(), SideEffects::None()),
+ : HExpression(constant->GetType(), SideEffects::None(), kNoDexPc),
needs_materialization_(needs_materialization) {
SetRawInputAt(0, method_base);
SetRawInputAt(1, constant);
@@ -61,6 +62,45 @@ class HX86LoadFromConstantTable : public HExpression<2> {
DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
};
+// X86 version of HPackedSwitch that holds a pointer to the base method address.
+class HX86PackedSwitch : public HTemplateInstruction<2> {
+ public:
+ HX86PackedSwitch(int32_t start_value,
+ int32_t num_entries,
+ HInstruction* input,
+ HX86ComputeBaseMethodAddress* method_base,
+ uint32_t dex_pc)
+ : HTemplateInstruction(SideEffects::None(), dex_pc),
+ start_value_(start_value),
+ num_entries_(num_entries) {
+ SetRawInputAt(0, input);
+ SetRawInputAt(1, method_base);
+ }
+
+ bool IsControlFlow() const OVERRIDE { return true; }
+
+ int32_t GetStartValue() const { return start_value_; }
+
+ int32_t GetNumEntries() const { return num_entries_; }
+
+ HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+ return InputAt(1)->AsX86ComputeBaseMethodAddress();
+ }
+
+ HBasicBlock* GetDefaultBlock() const {
+ // Last entry is the default block.
+ return GetBlock()->GetSuccessors()[num_entries_];
+ }
+
+ DECLARE_INSTRUCTION(X86PackedSwitch);
+
+ private:
+ const int32_t start_value_;
+ const int32_t num_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
+};
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 3e982dca23..17a4743290 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -31,6 +31,7 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/dumpable.h"
+#include "base/macros.h"
#include "base/timing_logger.h"
#include "boolean_simplifier.h"
#include "bounds_check_elimination.h"
@@ -168,13 +169,13 @@ class PassObserver : public ValueObject {
if (kIsDebugBuild) {
if (!graph_in_bad_state_) {
if (graph_->IsInSsaForm()) {
- SSAChecker checker(graph_->GetArena(), graph_);
+ SSAChecker checker(graph_);
checker.Run();
if (!checker.IsValid()) {
LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<SSAChecker>(checker);
}
} else {
- GraphChecker checker(graph_->GetArena(), graph_);
+ GraphChecker checker(graph_);
checker.Run();
if (!checker.IsValid()) {
LOG(FATAL) << "Error after " << pass_name << ": " << Dumpable<GraphChecker>(checker);
@@ -310,9 +311,6 @@ class OptimizingCompiler FINAL : public Compiler {
std::unique_ptr<std::ostream> visualizer_output_;
- // Delegate to Quick in case the optimizing compiler cannot compile a method.
- std::unique_ptr<Compiler> delegate_;
-
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
@@ -321,11 +319,9 @@ static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
: Compiler(driver, kMaximumCompilationTimeBeforeWarning),
run_optimizations_(
- driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
- delegate_(Create(driver, Compiler::Kind::kQuick)) {}
+ driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) {}
void OptimizingCompiler::Init() {
- delegate_->Init();
// Enable C1visualizer output. Must be done in Init() because the compiler
// driver is not fully initialized when passed to the compiler's constructor.
CompilerDriver* driver = GetCompilerDriver();
@@ -344,7 +340,6 @@ void OptimizingCompiler::Init() {
}
void OptimizingCompiler::UnInit() const {
- delegate_->UnInit();
}
OptimizingCompiler::~OptimizingCompiler() {
@@ -353,8 +348,7 @@ OptimizingCompiler::~OptimizingCompiler() {
}
}
-void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
- delegate_->InitCompilationUnit(cu);
+void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const {
}
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
@@ -364,7 +358,8 @@ bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
}
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
- return instruction_set == kArm64
+ return (instruction_set == kArm && !kArm32QuickCodeUseSoftFloat)
+ || instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
|| instruction_set == kMips64
|| instruction_set == kX86
@@ -541,6 +536,7 @@ static ArrayRef<const uint8_t> AlignVectorSize(ArenaVector<uint8_t>& vector) {
return ArrayRef<const uint8_t>(vector);
}
+NO_INLINE // Avoid increasing caller's frame size by large stack-allocated objects.
static void AllocateRegisters(HGraph* graph,
CodeGenerator* codegen,
PassObserver* pass_observer) {
@@ -670,11 +666,11 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
- UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
+
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
// overflow checks) assume thumb2.
if (instruction_set == kArm) {
@@ -716,9 +712,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
&arena, dex_file, method_idx, requires_barrier, compiler_driver->GetInstructionSet(),
kInvalidInvokeType, compiler_driver->GetCompilerOptions().GetDebuggable());
- // For testing purposes, we put a special marker on method names that should be compiled
- // with this compiler. This makes sure we're not regressing.
- bool shouldCompile = method_name.find("$opt$") != std::string::npos;
bool shouldOptimize = method_name.find("$opt$reg$") != std::string::npos && run_optimizations_;
std::unique_ptr<CodeGenerator> codegen(
@@ -727,7 +720,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
*compiler_driver->GetInstructionSetFeatures(),
compiler_driver->GetCompilerOptions()));
if (codegen.get() == nullptr) {
- CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
MaybeRecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
@@ -768,8 +760,6 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
{
PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
if (!builder.BuildGraph(*code_item)) {
- DCHECK(!(IsCompilingWithCoreImage() && shouldCompile))
- << "Could not build graph in optimizing compiler";
pass_observer.SetGraphInBadState();
return nullptr;
}
@@ -862,15 +852,16 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
}
- if (method != nullptr) {
- return method;
+ if (kIsDebugBuild &&
+ IsCompilingWithCoreImage() &&
+ IsInstructionSetSupported(compiler_driver->GetInstructionSet())) {
+ // For testing purposes, we put a special marker on method names that should be compiled
+ // with this compiler. This makes sure we're not regressing.
+ std::string method_name = PrettyMethod(method_idx, dex_file);
+ bool shouldCompile = method_name.find("$opt$") != std::string::npos;
+ DCHECK((method != nullptr) || !shouldCompile) << "Didn't compile " << method_name;
}
- method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- jclass_loader, dex_file, dex_cache);
- if (method != nullptr) {
- MaybeRecordStat(MethodCompilationStat::kCompiledQuick);
- }
return method;
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index df45c8e890..6375cf1a56 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -29,7 +29,6 @@ enum MethodCompilationStat {
kAttemptCompilation = 0,
kCompiledBaseline,
kCompiledOptimized,
- kCompiledQuick,
kInlinedInvoke,
kInstructionSimplifications,
kInstructionSimplificationsArch,
@@ -74,14 +73,11 @@ class OptimizingCompilerStats {
compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
size_t optimized_percent =
compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
- size_t quick_percent =
- compile_stats_[kCompiledQuick] * 100 / compile_stats_[kAttemptCompilation];
std::ostringstream oss;
oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: ";
oss << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, ";
oss << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized, ";
- oss << quick_percent << "% (" << compile_stats_[kCompiledQuick] << ") quick.";
LOG(INFO) << oss.str();
@@ -100,7 +96,6 @@ class OptimizingCompilerStats {
case kAttemptCompilation : return "kAttemptCompilation";
case kCompiledBaseline : return "kCompiledBaseline";
case kCompiledOptimized : return "kCompiledOptimized";
- case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
case kInstructionSimplifications: return "kInstructionSimplifications";
case kInstructionSimplificationsArch: return "kInstructionSimplificationsArch";
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index fce776920d..30bcf19c64 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -125,7 +125,6 @@ MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
// which means that a call to PerformMove could change any source operand
// in the move graph.
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
DCHECK(!move->IsPending());
if (move->IsRedundant()) {
@@ -406,7 +405,6 @@ void ParallelMoveResolverNoSwap::PerformMove(size_t index) {
// we will update source operand in the move graph to reduce dependencies in
// the graph.
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
DCHECK(!move->IsPending());
DCHECK(!move->IsEliminated());
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index da91cb811d..46e6f3e5d0 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -56,7 +56,6 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
: ParallelMoveResolverWithSwap(allocator) {}
void EmitMove(size_t index) OVERRIDE {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -69,7 +68,6 @@ class TestParallelMoveResolverWithSwap : public ParallelMoveResolverWithSwap {
}
void EmitSwap(size_t index) OVERRIDE {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
@@ -129,7 +127,6 @@ class TestParallelMoveResolverNoSwap : public ParallelMoveResolverNoSwap {
void FreeScratchLocation(Location loc ATTRIBUTE_UNUSED) OVERRIDE {}
void EmitMove(size_t index) OVERRIDE {
- DCHECK_LT(index, moves_.size());
MoveOperands* move = moves_[index];
if (!message_.str().empty()) {
message_ << " ";
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index 34850a564c..429e6e3d3f 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -131,7 +131,7 @@ class StringPrettyPrinter : public HPrettyPrinter {
PrintString(" ");
PrintInt(gota->GetId());
PrintString(": Goto ");
- PrintInt(current_block_->GetSuccessor(0)->GetBlockId());
+ PrintInt(current_block_->GetSuccessors()[0]->GetBlockId());
PrintNewLine();
}
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index d22f2540ad..a1feaf77bd 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -121,8 +121,9 @@ void ReferenceTypePropagation::Run() {
if (instr->IsBoundType()) {
DCHECK(instr->AsBoundType()->GetUpperBound().IsValid());
} else if (instr->IsLoadClass()) {
- DCHECK(instr->AsLoadClass()->GetReferenceTypeInfo().IsExact());
- DCHECK(instr->AsLoadClass()->GetLoadedClassRTI().IsValid());
+ HLoadClass* cls = instr->AsLoadClass();
+ DCHECK(cls->GetReferenceTypeInfo().IsExact());
+ DCHECK(!cls->GetLoadedClassRTI().IsValid() || cls->GetLoadedClassRTI().IsExact());
} else if (instr->IsNullCheck()) {
DCHECK(instr->GetReferenceTypeInfo().IsEqual(instr->InputAt(0)->GetReferenceTypeInfo()))
<< "NullCheck " << instr->GetReferenceTypeInfo()
@@ -168,6 +169,7 @@ static HBoundType* CreateBoundType(ArenaAllocator* arena,
SHARED_REQUIRES(Locks::mutator_lock_) {
ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo();
ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ DCHECK(class_rti.IsValid());
HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null);
// Narrow the type as much as possible.
if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) {
@@ -316,6 +318,15 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
return;
}
+ HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+ }
// We only need to bound the type if we have uses in the relevant block.
// So start with null and create the HBoundType lazily, only if it's needed.
HBoundType* bound_type = nullptr;
@@ -336,8 +347,6 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) {
if (instanceOfTrueBlock->Dominates(user->GetBlock())) {
if (bound_type == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- HLoadClass* load_class = instanceOf->InputAt(1)->AsLoadClass();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction();
if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) {
bound_type = CreateBoundType(
@@ -475,10 +484,10 @@ void RTPVisitor::VisitLoadClass(HLoadClass* instr) {
// Get type from dex cache assuming it was populated by the verifier.
mirror::Class* resolved_class = dex_cache->GetResolvedType(instr->GetTypeIndex());
// TODO: investigating why we are still getting unresolved classes: b/22821472.
- ReferenceTypeInfo::TypeHandle handle = (resolved_class != nullptr)
- ? handles_->NewHandle(resolved_class)
- : object_class_handle_;
- instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(handle, /* is_exact */ true));
+ if (resolved_class != nullptr) {
+ instr->SetLoadedClassRTI(ReferenceTypeInfo::Create(
+ handles_->NewHandle(resolved_class), /* is_exact */ true));
+ }
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(class_class_handle_, /* is_exact */ true));
}
@@ -517,6 +526,15 @@ void RTPVisitor::VisitFakeString(HFakeString* instr) {
}
void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
+ HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
+ ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
+ {
+ ScopedObjectAccess soa(Thread::Current());
+ if (!class_rti.IsValid()) {
+ // He have loaded an unresolved class. Don't bother bounding the type.
+ return;
+ }
+ }
HInstruction* obj = check_cast->InputAt(0);
HBoundType* bound_type = nullptr;
for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) {
@@ -524,8 +542,6 @@ void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) {
if (check_cast->StrictlyDominates(user)) {
if (bound_type == nullptr) {
ScopedObjectAccess soa(Thread::Current());
- HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass();
- ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI();
if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) {
bound_type = CreateBoundType(
GetGraph()->GetArena(),
@@ -740,7 +756,9 @@ void ReferenceTypePropagation::ProcessWorklist() {
while (!worklist_.empty()) {
HInstruction* instruction = worklist_.back();
worklist_.pop_back();
- if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) {
+ bool updated_nullability = UpdateNullability(instruction);
+ bool updated_reference_type = UpdateReferenceTypeInfo(instruction);
+ if (updated_nullability || updated_reference_type) {
AddDependentInstructionsToWorklist(instruction);
}
}
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 9cdb89b7b3..6fc77721e7 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -617,42 +617,40 @@ void RegisterAllocator::LinearScan() {
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
- // Note: Copy elements we keep to the beginning, just like
- // v.erase(std::remove(v.begin(), v.end(), value), v.end());
- auto active_kept_end = active_.begin();
- for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
- LiveInterval* interval = *it;
- if (interval->IsDeadAt(position)) {
- handled_.push_back(interval);
- } else if (!interval->Covers(position)) {
- inactive_.push_back(interval);
- } else {
- *active_kept_end++ = interval; // Keep this interval.
- }
- }
- // We have copied what we want to keep to [active_.begin(), active_kept_end),
- // the rest of the data in active_ is junk - drop it.
+ auto active_kept_end = std::remove_if(
+ active_.begin(),
+ active_.end(),
+ [this, position](LiveInterval* interval) {
+ if (interval->IsDeadAt(position)) {
+ handled_.push_back(interval);
+ return true;
+ } else if (!interval->Covers(position)) {
+ inactive_.push_back(interval);
+ return true;
+ } else {
+ return false; // Keep this interval.
+ }
+ });
active_.erase(active_kept_end, active_.end());
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- // Note: Copy elements we keep to the beginning, just like
- // v.erase(std::remove(v.begin(), v.begin() + num, value), v.begin() + num);
- auto inactive_kept_end = inactive_.begin();
auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
- for (auto it = inactive_.begin(); it != inactive_to_handle_end; ++it) {
- LiveInterval* interval = *it;
- DCHECK(interval->GetStart() < position || interval->IsFixed());
- if (interval->IsDeadAt(position)) {
- handled_.push_back(interval);
- } else if (interval->Covers(position)) {
- active_.push_back(interval);
- } else {
- *inactive_kept_end++ = interval; // Keep this interval.
- }
- }
- // We have copied what we want to keep to [inactive_.begin(), inactive_kept_end),
- // the rest of the data in the processed interval is junk - drop it.
+ auto inactive_kept_end = std::remove_if(
+ inactive_.begin(),
+ inactive_to_handle_end,
+ [this, position](LiveInterval* interval) {
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
+ if (interval->IsDeadAt(position)) {
+ handled_.push_back(interval);
+ return true;
+ } else if (interval->Covers(position)) {
+ active_.push_back(interval);
+ return true;
+ } else {
+ return false; // Keep this interval.
+ }
+ });
inactive_.erase(inactive_kept_end, inactive_to_handle_end);
if (current->IsSlowPathSafepoint()) {
@@ -1894,7 +1892,7 @@ void RegisterAllocator::Resolve() {
for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
- HBasicBlock* predecessor = current->GetPredecessor(i);
+ HBasicBlock* predecessor = current->GetPredecessors()[i];
DCHECK_EQ(predecessor->NumberOfNormalSuccessors(), 1u);
HInstruction* input = phi->InputAt(i);
Location source = input->GetLiveInterval()->GetLocationAt(
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 2bb5a8bb08..1511606950 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -312,7 +312,7 @@ TEST(RegisterAllocatorTest, Loop3) {
register_allocator.AllocateRegisters();
ASSERT_TRUE(register_allocator.Validate(false));
- HBasicBlock* loop_header = graph->GetBlock(2);
+ HBasicBlock* loop_header = graph->GetBlocks()[2];
HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
LiveInterval* phi_interval = phi->GetLiveInterval();
@@ -321,7 +321,7 @@ TEST(RegisterAllocatorTest, Loop3) {
ASSERT_TRUE(loop_update->HasRegister());
ASSERT_NE(phi_interval->GetRegister(), loop_update->GetRegister());
- HBasicBlock* return_block = graph->GetBlock(3);
+ HBasicBlock* return_block = graph->GetBlocks()[3];
HReturn* ret = return_block->GetLastInstruction()->AsReturn();
ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
}
@@ -343,8 +343,8 @@ TEST(RegisterAllocatorTest, FirstRegisterUse) {
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- HXor* first_xor = graph->GetBlock(1)->GetFirstInstruction()->AsXor();
- HXor* last_xor = graph->GetBlock(1)->GetLastInstruction()->GetPrevious()->AsXor();
+ HXor* first_xor = graph->GetBlocks()[1]->GetFirstInstruction()->AsXor();
+ HXor* last_xor = graph->GetBlocks()[1]->GetLastInstruction()->GetPrevious()->AsXor();
ASSERT_EQ(last_xor->InputAt(0), first_xor);
LiveInterval* interval = first_xor->GetLiveInterval();
ASSERT_EQ(interval->GetEnd(), last_xor->GetLifetimePosition());
@@ -488,7 +488,8 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
block->AddInstruction(test);
block->AddInstruction(new (allocator) HIf(test));
HBasicBlock* then = new (allocator) HBasicBlock(graph);
@@ -513,14 +514,16 @@ static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
*input2 = new (allocator) HInstanceFieldGet(parameter,
Primitive::kPrimInt,
MemberOffset(42),
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
then->AddInstruction(*input1);
else_->AddInstruction(*input2);
join->AddInstruction(new (allocator) HExit());
@@ -634,7 +637,8 @@ static HGraph* BuildFieldReturn(ArenaAllocator* allocator,
false,
kUnknownFieldIndex,
graph->GetDexFile(),
- dex_cache);
+ dex_cache,
+ 0);
block->AddInstruction(*field);
*ret = new (allocator) HReturn(*field);
block->AddInstruction(*ret);
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 338a3aaad0..1dc69867b4 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -76,18 +76,15 @@ void SideEffectsAnalysis::Run() {
SideEffects SideEffectsAnalysis::GetLoopEffects(HBasicBlock* block) const {
DCHECK(block->IsLoopHeader());
- DCHECK_LT(block->GetBlockId(), loop_effects_.size());
return loop_effects_[block->GetBlockId()];
}
SideEffects SideEffectsAnalysis::GetBlockEffects(HBasicBlock* block) const {
- DCHECK_LT(block->GetBlockId(), block_effects_.size());
return block_effects_[block->GetBlockId()];
}
void SideEffectsAnalysis::UpdateLoopEffects(HLoopInformation* info, SideEffects effects) {
uint32_t id = info->GetHeader()->GetBlockId();
- DCHECK_LT(id, loop_effects_.size());
loop_effects_[id] = loop_effects_[id].Union(effects);
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 40c75af6ef..4565590bc3 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -389,7 +389,6 @@ void SsaBuilder::BuildSsa() {
}
ArenaVector<HInstruction*>* SsaBuilder::GetLocalsFor(HBasicBlock* block) {
- DCHECK_LT(block->GetBlockId(), locals_for_.size());
ArenaVector<HInstruction*>* locals = &locals_for_[block->GetBlockId()];
const size_t vregs = GetGraph()->GetNumberOfVRegs();
if (locals->empty() && vregs != 0u) {
@@ -417,7 +416,6 @@ ArenaVector<HInstruction*>* SsaBuilder::GetLocalsFor(HBasicBlock* block) {
HInstruction* SsaBuilder::ValueOfLocal(HBasicBlock* block, size_t local) {
ArenaVector<HInstruction*>* locals = GetLocalsFor(block);
- DCHECK_LT(local, locals->size());
return (*locals)[local];
}
@@ -467,7 +465,7 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
for (size_t local = 0; local < current_locals_->size(); ++local) {
bool one_predecessor_has_no_value = false;
bool is_different = false;
- HInstruction* value = ValueOfLocal(block->GetPredecessor(0), local);
+ HInstruction* value = ValueOfLocal(block->GetPredecessors()[0], local);
for (HBasicBlock* predecessor : block->GetPredecessors()) {
HInstruction* current = ValueOfLocal(predecessor, local);
@@ -489,7 +487,7 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
GetGraph()->GetArena(), local, block->GetPredecessors().size(), Primitive::kPrimVoid);
for (size_t i = 0; i < block->GetPredecessors().size(); i++) {
- HInstruction* pred_value = ValueOfLocal(block->GetPredecessor(i), local);
+ HInstruction* pred_value = ValueOfLocal(block->GetPredecessors()[i], local);
phi->SetRawInputAt(i, pred_value);
}
block->AddPhi(phi);
@@ -626,7 +624,6 @@ HInstruction* SsaBuilder::GetReferenceTypeEquivalent(HInstruction* value) {
}
void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
- DCHECK_LT(load->GetLocal()->GetRegNumber(), current_locals_->size());
HInstruction* value = (*current_locals_)[load->GetLocal()->GetRegNumber()];
// If the operation requests a specific type, we make sure its input is of that type.
if (load->GetType() != value->GetType()) {
@@ -641,7 +638,6 @@ void SsaBuilder::VisitLoadLocal(HLoadLocal* load) {
}
void SsaBuilder::VisitStoreLocal(HStoreLocal* store) {
- DCHECK_LT(store->GetLocal()->GetRegNumber(), current_locals_->size());
(*current_locals_)[store->GetLocal()->GetRegNumber()] = store->InputAt(1);
store->GetBlock()->RemoveInstruction(store);
}
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index b869d57be8..b9d8731cc2 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -159,7 +159,6 @@ void SsaLivenessAnalysis::NumberInstructions() {
void SsaLivenessAnalysis::ComputeLiveness() {
for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- DCHECK_LT(block->GetBlockId(), block_infos_.size());
block_infos_[block->GetBlockId()] =
new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
}
@@ -388,14 +387,14 @@ int LiveInterval::FindFirstRegisterHint(size_t* free_until,
}
// If the instruction dies at the phi assignment, we can try having the
// same register.
- if (end == user->GetBlock()->GetPredecessor(input_index)->GetLifetimeEnd()) {
+ if (end == user->GetBlock()->GetPredecessors()[input_index]->GetLifetimeEnd()) {
for (size_t i = 0, e = user->InputCount(); i < e; ++i) {
if (i == input_index) {
continue;
}
HInstruction* input = user->InputAt(i);
Location location = input->GetLiveInterval()->GetLocationAt(
- user->GetBlock()->GetPredecessor(i)->GetLifetimeEnd() - 1);
+ user->GetBlock()->GetPredecessors()[i]->GetLifetimeEnd() - 1);
if (location.IsRegisterKind()) {
int reg = RegisterOrLowRegister(location);
if (free_until[reg] >= use_position) {
@@ -432,7 +431,6 @@ int LiveInterval::FindHintAtDefinition() const {
const ArenaVector<HBasicBlock*>& predecessors = defined_by_->GetBlock()->GetPredecessors();
for (size_t i = 0, e = defined_by_->InputCount(); i < e; ++i) {
HInstruction* input = defined_by_->InputAt(i);
- DCHECK_LT(i, predecessors.size());
size_t end = predecessors[i]->GetLifetimeEnd();
LiveInterval* input_interval = input->GetLiveInterval()->GetSiblingAt(end - 1);
if (input_interval->GetEnd() == end) {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index e4b0999d4f..572a7b6a53 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1117,27 +1117,22 @@ class SsaLivenessAnalysis : public ValueObject {
void Analyze();
BitVector* GetLiveInSet(const HBasicBlock& block) const {
- DCHECK_LT(block.GetBlockId(), block_infos_.size());
return &block_infos_[block.GetBlockId()]->live_in_;
}
BitVector* GetLiveOutSet(const HBasicBlock& block) const {
- DCHECK_LT(block.GetBlockId(), block_infos_.size());
return &block_infos_[block.GetBlockId()]->live_out_;
}
BitVector* GetKillSet(const HBasicBlock& block) const {
- DCHECK_LT(block.GetBlockId(), block_infos_.size());
return &block_infos_[block.GetBlockId()]->kill_;
}
HInstruction* GetInstructionFromSsaIndex(size_t index) const {
- DCHECK_LT(index, instructions_from_ssa_index_.size());
return instructions_from_ssa_index_[index];
}
HInstruction* GetInstructionFromPosition(size_t index) const {
- DCHECK_LT(index, instructions_from_lifetime_position_.size());
return instructions_from_lifetime_position_[index];
}
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index f27cecc8fa..c60a4eacaa 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -24,6 +24,7 @@ void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
uint32_t num_dex_registers,
uint8_t inlining_depth) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
+ DCHECK_NE(dex_pc, static_cast<uint32_t>(-1)) << "invalid dex_pc";
current_entry_.dex_pc = dex_pc;
current_entry_.native_pc_offset = native_pc_offset;
current_entry_.register_mask = register_mask;
@@ -209,7 +210,6 @@ size_t StackMapStream::ComputeDexRegisterMapsSize() const {
// Entries with the same dex map will have the same offset.
}
for (size_t j = 0; j < entry.inlining_depth; ++j) {
- DCHECK_LT(inline_info_index, inline_infos_.size());
InlineInfoEntry inline_entry = inline_infos_[inline_info_index++];
size += ComputeDexRegisterMapSize(inline_entry.num_dex_registers,
inline_entry.live_dex_registers_mask);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 4783e283b3..fc27a2b446 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -63,6 +63,7 @@ class StackMapStream : public ValueObject {
: allocator_(allocator),
stack_maps_(allocator->Adapter(kArenaAllocStackMapStream)),
location_catalog_entries_(allocator->Adapter(kArenaAllocStackMapStream)),
+ location_catalog_entries_indices_(allocator->Adapter(kArenaAllocStackMapStream)),
dex_register_locations_(allocator->Adapter(kArenaAllocStackMapStream)),
inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)),
stack_mask_max_(-1),
@@ -136,12 +137,10 @@ class StackMapStream : public ValueObject {
}
const StackMapEntry& GetStackMap(size_t i) const {
- DCHECK_LT(i, stack_maps_.size());
return stack_maps_[i];
}
void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- DCHECK_LT(i, stack_maps_.size());
stack_maps_[i].native_pc_offset = native_pc_offset;
}
@@ -175,8 +174,10 @@ class StackMapStream : public ValueObject {
ArenaVector<DexRegisterLocation> location_catalog_entries_;
// Map from Dex register location catalog entries to their indices in the
// location catalog.
- typedef HashMap<DexRegisterLocation, size_t, LocationCatalogEntriesIndicesEmptyFn,
- DexRegisterLocationHashFn> LocationCatalogEntriesIndices;
+ using LocationCatalogEntriesIndices = ArenaHashMap<DexRegisterLocation,
+ size_t,
+ LocationCatalogEntriesIndicesEmptyFn,
+ DexRegisterLocationHashFn>;
LocationCatalogEntriesIndices location_catalog_entries_indices_;
// A set of concatenated maps of Dex register locations indices to `location_catalog_entries_`.
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index e745d94b89..b6c704c1b1 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -36,7 +36,7 @@ static void TestCode(const uint16_t* data) {
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
- HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessor(0);
+ HBasicBlock* first_block = graph->GetEntryBlock()->GetSuccessors()[0];
HInstruction* first_instruction = first_block->GetFirstInstruction();
// Account for some tests having a store local as first instruction.
ASSERT_TRUE(first_instruction->IsSuspendCheck()
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 967b191d32..d59bc6be40 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -470,6 +470,13 @@ class ArmAssembler : public Assembler {
orr(rd, rn, so, cond, kCcSet);
}
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
+
+ virtual void orns(Register rd, Register rn, const ShifterOperand& so, Condition cond = AL) {
+ orn(rd, rn, so, cond, kCcSet);
+ }
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) = 0;
@@ -832,6 +839,8 @@ class ArmAssembler : public Assembler {
uint32_t immediate,
ShifterOperand* shifter_op) = 0;
+ virtual bool ShifterOperandCanAlwaysHold(uint32_t immediate) = 0;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index f7772aea3d..6e7c828b4a 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -48,6 +48,11 @@ bool Arm32Assembler::ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOpera
return false;
}
+bool Arm32Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ ShifterOperand shifter_op;
+ return ShifterOperandCanHoldArm32(immediate, &shifter_op);
+}
+
bool Arm32Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
Register rn ATTRIBUTE_UNUSED,
Opcode opcode ATTRIBUTE_UNUSED,
@@ -130,6 +135,15 @@ void Arm32Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
}
+void Arm32Assembler::orn(Register rd ATTRIBUTE_UNUSED,
+ Register rn ATTRIBUTE_UNUSED,
+ const ShifterOperand& so ATTRIBUTE_UNUSED,
+ Condition cond ATTRIBUTE_UNUSED,
+ SetCc set_cc ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "orn is not supported on ARM32";
+}
+
+
void Arm32Assembler::mov(Register rd, const ShifterOperand& so,
Condition cond, SetCc set_cc) {
EmitType01(cond, so.type(), MOV, set_cc, R0, rd, so);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 3407369654..4646538716 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -74,6 +74,9 @@ class Arm32Assembler FINAL : public ArmAssembler {
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
@@ -294,6 +297,7 @@ class Arm32Assembler FINAL : public ArmAssembler {
uint32_t immediate,
ShifterOperand* shifter_op) OVERRIDE;
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 0f6c4f5a34..cc87856e82 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -390,6 +390,10 @@ void Thumb2Assembler::FinalizeCode() {
EmitLiterals();
}
+bool Thumb2Assembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
+ return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+}
+
bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
Register rn ATTRIBUTE_UNUSED,
Opcode opcode,
@@ -410,6 +414,7 @@ bool Thumb2Assembler::ShifterOperandCanHold(Register rd ATTRIBUTE_UNUSED,
case MOV:
// TODO: Support less than or equal to 12bits.
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
+
case MVN:
default:
return ArmAssembler::ModifiedImmediate(immediate) != kInvalidModifiedImmediate;
@@ -492,6 +497,12 @@ void Thumb2Assembler::orr(Register rd, Register rn, const ShifterOperand& so,
}
+void Thumb2Assembler::orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond, SetCc set_cc) {
+ EmitDataProcessing(cond, ORN, set_cc, rn, rd, so);
+}
+
+
void Thumb2Assembler::mov(Register rd, const ShifterOperand& so,
Condition cond, SetCc set_cc) {
EmitDataProcessing(cond, MOV, set_cc, R0, rd, so);
@@ -1105,6 +1116,7 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
rn_is_valid = false; // There is no Rn for these instructions.
break;
case TEQ:
+ case ORN:
return true;
case ADD:
case SUB:
@@ -1222,6 +1234,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
case MOV: thumb_opcode = 2U /* 0b0010 */; rn = PC; break;
case BIC: thumb_opcode = 1U /* 0b0001 */; break;
case MVN: thumb_opcode = 3U /* 0b0011 */; rn = PC; break;
+ case ORN: thumb_opcode = 3U /* 0b0011 */; break;
default:
break;
}
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index a1a8927f44..055b1379ad 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -98,6 +98,9 @@ class Thumb2Assembler FINAL : public ArmAssembler {
virtual void orr(Register rd, Register rn, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+ virtual void orn(Register rd, Register rn, const ShifterOperand& so,
+ Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
+
virtual void mov(Register rd, const ShifterOperand& so,
Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
@@ -337,6 +340,8 @@ class Thumb2Assembler FINAL : public ArmAssembler {
uint32_t immediate,
ShifterOperand* shifter_op) OVERRIDE;
+ bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
+
static bool IsInstructionForExceptionHandling(uintptr_t pc);
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 6b4daed909..2060064423 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -148,7 +148,8 @@ enum Opcode {
MOV = 13, // Move
BIC = 14, // Bit Clear
MVN = 15, // Move Not
- kMaxOperand = 16
+ ORN = 16, // Logical OR NOT.
+ kMaxOperand = 17
};
std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index 48f0328dce..5c33639a6a 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -77,15 +77,19 @@ class ArrayRef {
: array_(array_in), size_(size_in) {
}
- template <typename Alloc>
- explicit ArrayRef(std::vector<T, Alloc>& v)
+ template <typename Vector,
+ typename = typename std::enable_if<
+ std::is_same<typename Vector::value_type, value_type>::value>::type>
+ explicit ArrayRef(Vector& v)
: array_(v.data()), size_(v.size()) {
}
- template <typename U, typename Alloc>
- explicit ArrayRef(const std::vector<U, Alloc>& v,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type
- t ATTRIBUTE_UNUSED = tag())
+ template <typename Vector,
+ typename = typename std::enable_if<
+ std::is_same<
+ typename std::add_const<typename Vector::value_type>::type,
+ value_type>::value>::type>
+ explicit ArrayRef(const Vector& v)
: array_(v.data()), size_(v.size()) {
}
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 496ca95ff9..b01b0fe4e0 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -122,7 +122,8 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() {
this->AdvancePC(assembler_->CodeSize());
}
-Assembler* Assembler::Create(InstructionSet instruction_set) {
+Assembler* Assembler::Create(InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
@@ -136,7 +137,9 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case kMips:
- return new mips::MipsAssembler();
+ return new mips::MipsAssembler(instruction_set_features != nullptr
+ ? instruction_set_features->AsMipsInstructionSetFeatures()
+ : nullptr);
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64:
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 1088cb1bbd..d97a2a40b2 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -20,6 +20,7 @@
#include <vector>
#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
#include "base/logging.h"
#include "base/macros.h"
#include "arm/constants_arm.h"
@@ -284,7 +285,8 @@ class DebugFrameOpCodeWriterForAssembler FINAL
class Assembler {
public:
- static Assembler* Create(InstructionSet instruction_set);
+ static Assembler* Create(InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features = nullptr);
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
@@ -299,7 +301,7 @@ class Assembler {
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { UNUSED(format); }
+ virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index 93473fb4c7..f1233ca457 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -138,13 +138,14 @@ class AssemblerTest : public testing::Test {
return RepeatRegisterImm<RegisterView::kUseSecondaryName>(f, imm_bytes, fmt);
}
- template <typename Reg1Type, typename Reg2Type, typename ImmType,
- RegisterView Reg1View, RegisterView Reg2View>
- std::string RepeatRegRegImmBits(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
- int imm_bits,
- std::string fmt) {
- const std::vector<Reg1Type*> reg1_registers = GetRegisters();
- const std::vector<Reg2Type*> reg2_registers = GetRegisters();
+ template <typename Reg1, typename Reg2, typename ImmType>
+ std::string RepeatTemplatedRegistersImmBits(void (Ass::*f)(Reg1, Reg2, ImmType),
+ int imm_bits,
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
std::string str;
std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0);
@@ -155,13 +156,13 @@ class AssemblerTest : public testing::Test {
(assembler_.get()->*f)(*reg1, *reg2, new_imm);
std::string base = fmt;
- std::string reg1_string = GetRegName<Reg1View>(*reg1);
+ std::string reg1_string = (this->*GetName1)(*reg1);
size_t reg1_index;
while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
}
- std::string reg2_string = GetRegName<Reg2View>(*reg2);
+ std::string reg2_string = (this->*GetName2)(*reg2);
size_t reg2_index;
while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
@@ -187,15 +188,75 @@ class AssemblerTest : public testing::Test {
return str;
}
- template <typename Reg1Type, typename Reg2Type, typename ImmType>
- std::string RepeatRRIb(void (Ass::*f)(Reg1Type, Reg2Type, ImmType),
- int imm_bits,
- std::string fmt) {
- return RepeatRegRegImmBits<Reg1Type,
- Reg2Type,
- ImmType,
- RegisterView::kUsePrimaryName,
- RegisterView::kUsePrimaryName>(f, imm_bits, fmt);
+ template <typename RegType, typename ImmType>
+ std::string RepeatTemplatedRegisterImmBits(void (Ass::*f)(RegType, ImmType),
+ int imm_bits,
+ const std::vector<Reg*> registers,
+ std::string (AssemblerTest::*GetName)(const RegType&),
+ std::string fmt) {
+ std::string str;
+ std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0);
+
+ for (auto reg : registers) {
+ for (int64_t imm : imms) {
+ ImmType new_imm = CreateImmediate(imm);
+ (assembler_.get()->*f)(*reg, new_imm);
+ std::string base = fmt;
+
+ std::string reg_string = (this->*GetName)(*reg);
+ size_t reg_index;
+ while ((reg_index = base.find(REG_TOKEN)) != std::string::npos) {
+ base.replace(reg_index, ConstexprStrLen(REG_TOKEN), reg_string);
+ }
+
+ size_t imm_index = base.find(IMM_TOKEN);
+ if (imm_index != std::string::npos) {
+ std::ostringstream sreg;
+ sreg << imm;
+ std::string imm_string = sreg.str();
+ base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
+ template <typename ImmType>
+ std::string RepeatRRIb(void (Ass::*f)(Reg, Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegistersImmBits<Reg, Reg, ImmType>(f,
+ imm_bits,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename ImmType>
+ std::string RepeatRIb(void (Ass::*f)(Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegisterImmBits<Reg, ImmType>(f,
+ imm_bits,
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
+ template <typename ImmType>
+ std::string RepeatFRIb(void (Ass::*f)(FPReg, Reg, ImmType), int imm_bits, std::string fmt) {
+ return RepeatTemplatedRegistersImmBits<FPReg, Reg, ImmType>(f,
+ imm_bits,
+ GetFPRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetFPRegName,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
}
std::string RepeatFF(void (Ass::*f)(FPReg, FPReg), std::string fmt) {
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index c8b3fe58a8..8c71292465 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -67,12 +67,20 @@ class AssemblerTestInfrastructure {
// This is intended to be run as a test.
bool CheckTools() {
- if (!FileExists(FindTool(assembler_cmd_name_))) {
+ std::string asm_tool = FindTool(assembler_cmd_name_);
+ if (!FileExists(asm_tool)) {
+ LOG(ERROR) << "Could not find assembler from " << assembler_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << asm_tool;
+ FindToolDump(assembler_cmd_name_);
return false;
}
LOG(INFO) << "Chosen assembler command: " << GetAssemblerCommand();
- if (!FileExists(FindTool(objdump_cmd_name_))) {
+ std::string objdump_tool = FindTool(objdump_cmd_name_);
+ if (!FileExists(objdump_tool)) {
+ LOG(ERROR) << "Could not find objdump from " << objdump_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << objdump_tool;
+ FindToolDump(objdump_cmd_name_);
return false;
}
LOG(INFO) << "Chosen objdump command: " << GetObjdumpCommand();
@@ -80,7 +88,11 @@ class AssemblerTestInfrastructure {
// Disassembly is optional.
std::string disassembler = GetDisassembleCommand();
if (disassembler.length() != 0) {
- if (!FileExists(FindTool(disassembler_cmd_name_))) {
+ std::string disassembler_tool = FindTool(disassembler_cmd_name_);
+ if (!FileExists(disassembler_tool)) {
+ LOG(ERROR) << "Could not find disassembler from " << disassembler_cmd_name_;
+ LOG(ERROR) << "FindTool returned " << disassembler_tool;
+ FindToolDump(disassembler_cmd_name_);
return false;
}
LOG(INFO) << "Chosen disassemble command: " << GetDisassembleCommand();
@@ -493,7 +505,7 @@ class AssemblerTestInfrastructure {
std::string error_msg;
if (!Exec(args, &error_msg)) {
EXPECT_TRUE(false) << error_msg;
- return "";
+ UNREACHABLE();
}
std::ifstream in(tmp_file.c_str());
@@ -508,6 +520,54 @@ class AssemblerTestInfrastructure {
return line;
}
+ // Helper for below. If name_predicate is empty, search for all files, otherwise use it for the
+ // "-name" option.
+ static void FindToolDumpPrintout(std::string name_predicate, std::string tmp_file) {
+ std::string gcc_path = GetRootPath() + GetGCCRootPath();
+ std::vector<std::string> args;
+ args.push_back("find");
+ args.push_back(gcc_path);
+ if (!name_predicate.empty()) {
+ args.push_back("-name");
+ args.push_back(name_predicate);
+ }
+ args.push_back("|");
+ args.push_back("sort");
+ args.push_back(">");
+ args.push_back(tmp_file);
+ std::string sh_args = Join(args, ' ');
+
+ args.clear();
+ args.push_back("/bin/sh");
+ args.push_back("-c");
+ args.push_back(sh_args);
+
+ std::string error_msg;
+ if (!Exec(args, &error_msg)) {
+ EXPECT_TRUE(false) << error_msg;
+ UNREACHABLE();
+ }
+
+ LOG(ERROR) << "FindToolDump: gcc_path=" << gcc_path
+ << " cmd=" << sh_args;
+ std::ifstream in(tmp_file.c_str());
+ if (in) {
+ std::string line;
+ while (std::getline(in, line)) {
+ LOG(ERROR) << line;
+ }
+ }
+ in.close();
+ std::remove(tmp_file.c_str());
+ }
+
+ // For debug purposes.
+ void FindToolDump(std::string tool_name) {
+ // Check with the tool name.
+ FindToolDumpPrintout(architecture_string_ + "*" + tool_name, GetTmpnam());
+ FindToolDumpPrintout("", GetTmpnam());
+ }
+
// Use a consistent tmpnam, so store it.
std::string GetTmpnam() {
if (tmpnam_.length() == 0) {
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index b2a354b63c..2ae88413e7 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -238,6 +238,7 @@ TEST(Thumb2AssemblerTest, DataProcessingRegister) {
__ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
+ __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
__ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
@@ -371,6 +372,7 @@ TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
__ sub(R0, R1, ShifterOperand(0x55));
__ and_(R0, R1, ShifterOperand(0x55));
__ orr(R0, R1, ShifterOperand(0x55));
+ __ orn(R0, R1, ShifterOperand(0x55));
__ eor(R0, R1, ShifterOperand(0x55));
__ bic(R0, R1, ShifterOperand(0x55));
__ adc(R0, R1, ShifterOperand(0x55));
@@ -403,6 +405,7 @@ TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
__ sub(R0, R1, ShifterOperand(0x550055));
__ and_(R0, R1, ShifterOperand(0x550055));
__ orr(R0, R1, ShifterOperand(0x550055));
+ __ orn(R0, R1, ShifterOperand(0x550055));
__ eor(R0, R1, ShifterOperand(0x550055));
__ bic(R0, R1, ShifterOperand(0x550055));
__ adc(R0, R1, ShifterOperand(0x550055));
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 82ad6429bf..b79c2e46f0 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -23,109 +23,110 @@ const char* DataProcessingRegisterResults[] = {
" 8: eba1 0002 sub.w r0, r1, r2\n",
" c: ea01 0002 and.w r0, r1, r2\n",
" 10: ea41 0002 orr.w r0, r1, r2\n",
- " 14: ea81 0002 eor.w r0, r1, r2\n",
- " 18: ea21 0002 bic.w r0, r1, r2\n",
- " 1c: eb41 0002 adc.w r0, r1, r2\n",
- " 20: eb61 0002 sbc.w r0, r1, r2\n",
- " 24: ebc1 0002 rsb r0, r1, r2\n",
- " 28: ea90 0f01 teq r0, r1\n",
- " 2c: 0008 movs r0, r1\n",
- " 2e: 4608 mov r0, r1\n",
- " 30: 43c8 mvns r0, r1\n",
- " 32: 4408 add r0, r1\n",
- " 34: 1888 adds r0, r1, r2\n",
- " 36: 1a88 subs r0, r1, r2\n",
- " 38: 4148 adcs r0, r1\n",
- " 3a: 4188 sbcs r0, r1\n",
- " 3c: 4008 ands r0, r1\n",
- " 3e: 4308 orrs r0, r1\n",
- " 40: 4048 eors r0, r1\n",
- " 42: 4388 bics r0, r1\n",
- " 44: 4208 tst r0, r1\n",
- " 46: 4288 cmp r0, r1\n",
- " 48: 42c8 cmn r0, r1\n",
- " 4a: 4641 mov r1, r8\n",
- " 4c: 4681 mov r9, r0\n",
- " 4e: 46c8 mov r8, r9\n",
- " 50: 4441 add r1, r8\n",
- " 52: 4481 add r9, r0\n",
- " 54: 44c8 add r8, r9\n",
- " 56: 4548 cmp r0, r9\n",
- " 58: 4588 cmp r8, r1\n",
- " 5a: 45c1 cmp r9, r8\n",
- " 5c: 4248 negs r0, r1\n",
- " 5e: 4240 negs r0, r0\n",
- " 60: ea5f 0008 movs.w r0, r8\n",
- " 64: ea7f 0008 mvns.w r0, r8\n",
- " 68: eb01 0008 add.w r0, r1, r8\n",
- " 6c: eb11 0008 adds.w r0, r1, r8\n",
- " 70: ebb1 0008 subs.w r0, r1, r8\n",
- " 74: eb50 0008 adcs.w r0, r0, r8\n",
- " 78: eb70 0008 sbcs.w r0, r0, r8\n",
- " 7c: ea10 0008 ands.w r0, r0, r8\n",
- " 80: ea50 0008 orrs.w r0, r0, r8\n",
- " 84: ea90 0008 eors.w r0, r0, r8\n",
- " 88: ea30 0008 bics.w r0, r0, r8\n",
- " 8c: ea10 0f08 tst.w r0, r8\n",
- " 90: eb10 0f08 cmn.w r0, r8\n",
- " 94: f1d8 0000 rsbs r0, r8, #0\n",
- " 98: f1d8 0800 rsbs r8, r8, #0\n",
- " 9c: bf08 it eq\n",
- " 9e: ea7f 0001 mvnseq.w r0, r1\n",
- " a2: bf08 it eq\n",
- " a4: eb11 0002 addseq.w r0, r1, r2\n",
- " a8: bf08 it eq\n",
- " aa: ebb1 0002 subseq.w r0, r1, r2\n",
- " ae: bf08 it eq\n",
- " b0: eb50 0001 adcseq.w r0, r0, r1\n",
- " b4: bf08 it eq\n",
- " b6: eb70 0001 sbcseq.w r0, r0, r1\n",
- " ba: bf08 it eq\n",
- " bc: ea10 0001 andseq.w r0, r0, r1\n",
- " c0: bf08 it eq\n",
- " c2: ea50 0001 orrseq.w r0, r0, r1\n",
- " c6: bf08 it eq\n",
- " c8: ea90 0001 eorseq.w r0, r0, r1\n",
- " cc: bf08 it eq\n",
- " ce: ea30 0001 bicseq.w r0, r0, r1\n",
- " d2: bf08 it eq\n",
- " d4: 43c8 mvneq r0, r1\n",
+ " 14: ea61 0002 orn r0, r1, r2\n",
+ " 18: ea81 0002 eor.w r0, r1, r2\n",
+ " 1c: ea21 0002 bic.w r0, r1, r2\n",
+ " 20: eb41 0002 adc.w r0, r1, r2\n",
+ " 24: eb61 0002 sbc.w r0, r1, r2\n",
+ " 28: ebc1 0002 rsb r0, r1, r2\n",
+ " 2c: ea90 0f01 teq r0, r1\n",
+ " 30: 0008 movs r0, r1\n",
+ " 32: 4608 mov r0, r1\n",
+ " 34: 43c8 mvns r0, r1\n",
+ " 36: 4408 add r0, r1\n",
+ " 38: 1888 adds r0, r1, r2\n",
+ " 3a: 1a88 subs r0, r1, r2\n",
+ " 3c: 4148 adcs r0, r1\n",
+ " 3e: 4188 sbcs r0, r1\n",
+ " 40: 4008 ands r0, r1\n",
+ " 42: 4308 orrs r0, r1\n",
+ " 44: 4048 eors r0, r1\n",
+ " 46: 4388 bics r0, r1\n",
+ " 48: 4208 tst r0, r1\n",
+ " 4a: 4288 cmp r0, r1\n",
+ " 4c: 42c8 cmn r0, r1\n",
+ " 4e: 4641 mov r1, r8\n",
+ " 50: 4681 mov r9, r0\n",
+ " 52: 46c8 mov r8, r9\n",
+ " 54: 4441 add r1, r8\n",
+ " 56: 4481 add r9, r0\n",
+ " 58: 44c8 add r8, r9\n",
+ " 5a: 4548 cmp r0, r9\n",
+ " 5c: 4588 cmp r8, r1\n",
+ " 5e: 45c1 cmp r9, r8\n",
+ " 60: 4248 negs r0, r1\n",
+ " 62: 4240 negs r0, r0\n",
+ " 64: ea5f 0008 movs.w r0, r8\n",
+ " 68: ea7f 0008 mvns.w r0, r8\n",
+ " 6c: eb01 0008 add.w r0, r1, r8\n",
+ " 70: eb11 0008 adds.w r0, r1, r8\n",
+ " 74: ebb1 0008 subs.w r0, r1, r8\n",
+ " 78: eb50 0008 adcs.w r0, r0, r8\n",
+ " 7c: eb70 0008 sbcs.w r0, r0, r8\n",
+ " 80: ea10 0008 ands.w r0, r0, r8\n",
+ " 84: ea50 0008 orrs.w r0, r0, r8\n",
+ " 88: ea90 0008 eors.w r0, r0, r8\n",
+ " 8c: ea30 0008 bics.w r0, r0, r8\n",
+ " 90: ea10 0f08 tst.w r0, r8\n",
+ " 94: eb10 0f08 cmn.w r0, r8\n",
+ " 98: f1d8 0000 rsbs r0, r8, #0\n",
+ " 9c: f1d8 0800 rsbs r8, r8, #0\n",
+ " a0: bf08 it eq\n",
+ " a2: ea7f 0001 mvnseq.w r0, r1\n",
+ " a6: bf08 it eq\n",
+ " a8: eb11 0002 addseq.w r0, r1, r2\n",
+ " ac: bf08 it eq\n",
+ " ae: ebb1 0002 subseq.w r0, r1, r2\n",
+ " b2: bf08 it eq\n",
+ " b4: eb50 0001 adcseq.w r0, r0, r1\n",
+ " b8: bf08 it eq\n",
+ " ba: eb70 0001 sbcseq.w r0, r0, r1\n",
+ " be: bf08 it eq\n",
+ " c0: ea10 0001 andseq.w r0, r0, r1\n",
+ " c4: bf08 it eq\n",
+ " c6: ea50 0001 orrseq.w r0, r0, r1\n",
+ " ca: bf08 it eq\n",
+ " cc: ea90 0001 eorseq.w r0, r0, r1\n",
+ " d0: bf08 it eq\n",
+ " d2: ea30 0001 bicseq.w r0, r0, r1\n",
" d6: bf08 it eq\n",
- " d8: 1888 addeq r0, r1, r2\n",
+ " d8: 43c8 mvneq r0, r1\n",
" da: bf08 it eq\n",
- " dc: 1a88 subeq r0, r1, r2\n",
+ " dc: 1888 addeq r0, r1, r2\n",
" de: bf08 it eq\n",
- " e0: 4148 adceq r0, r1\n",
+ " e0: 1a88 subeq r0, r1, r2\n",
" e2: bf08 it eq\n",
- " e4: 4188 sbceq r0, r1\n",
+ " e4: 4148 adceq r0, r1\n",
" e6: bf08 it eq\n",
- " e8: 4008 andeq r0, r1\n",
+ " e8: 4188 sbceq r0, r1\n",
" ea: bf08 it eq\n",
- " ec: 4308 orreq r0, r1\n",
+ " ec: 4008 andeq r0, r1\n",
" ee: bf08 it eq\n",
- " f0: 4048 eoreq r0, r1\n",
+ " f0: 4308 orreq r0, r1\n",
" f2: bf08 it eq\n",
- " f4: 4388 biceq r0, r1\n",
- " f6: 4608 mov r0, r1\n",
- " f8: 43c8 mvns r0, r1\n",
- " fa: 4408 add r0, r1\n",
- " fc: 1888 adds r0, r1, r2\n",
- " fe: 1a88 subs r0, r1, r2\n",
- " 100: 4148 adcs r0, r1\n",
- " 102: 4188 sbcs r0, r1\n",
- " 104: 4008 ands r0, r1\n",
- " 106: 4308 orrs r0, r1\n",
- " 108: 4048 eors r0, r1\n",
- " 10a: 4388 bics r0, r1\n",
- " 10c: 4641 mov r1, r8\n",
- " 10e: 4681 mov r9, r0\n",
- " 110: 46c8 mov r8, r9\n",
- " 112: 4441 add r1, r8\n",
- " 114: 4481 add r9, r0\n",
- " 116: 44c8 add r8, r9\n",
- " 118: 4248 negs r0, r1\n",
- " 11a: 4240 negs r0, r0\n",
- " 11c: eb01 0c00 add.w ip, r1, r0\n",
+ " f4: 4048 eoreq r0, r1\n",
+ " f6: bf08 it eq\n",
+ " f8: 4388 biceq r0, r1\n",
+ " fa: 4608 mov r0, r1\n",
+ " fc: 43c8 mvns r0, r1\n",
+ " fe: 4408 add r0, r1\n",
+ " 100: 1888 adds r0, r1, r2\n",
+ " 102: 1a88 subs r0, r1, r2\n",
+ " 104: 4148 adcs r0, r1\n",
+ " 106: 4188 sbcs r0, r1\n",
+ " 108: 4008 ands r0, r1\n",
+ " 10a: 4308 orrs r0, r1\n",
+ " 10c: 4048 eors r0, r1\n",
+ " 10e: 4388 bics r0, r1\n",
+ " 110: 4641 mov r1, r8\n",
+ " 112: 4681 mov r9, r0\n",
+ " 114: 46c8 mov r8, r9\n",
+ " 116: 4441 add r1, r8\n",
+ " 118: 4481 add r9, r0\n",
+ " 11a: 44c8 add r8, r9\n",
+ " 11c: 4248 negs r0, r1\n",
+ " 11e: 4240 negs r0, r0\n",
+ " 120: eb01 0c00 add.w ip, r1, r0\n",
nullptr
};
const char* DataProcessingImmediateResults[] = {
@@ -135,21 +136,22 @@ const char* DataProcessingImmediateResults[] = {
" a: f2a1 0055 subw r0, r1, #85 ; 0x55\n",
" e: f001 0055 and.w r0, r1, #85 ; 0x55\n",
" 12: f041 0055 orr.w r0, r1, #85 ; 0x55\n",
- " 16: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
- " 1a: f021 0055 bic.w r0, r1, #85 ; 0x55\n",
- " 1e: f141 0055 adc.w r0, r1, #85 ; 0x55\n",
- " 22: f161 0055 sbc.w r0, r1, #85 ; 0x55\n",
- " 26: f1c1 0055 rsb r0, r1, #85 ; 0x55\n",
- " 2a: f010 0f55 tst.w r0, #85 ; 0x55\n",
- " 2e: f090 0f55 teq r0, #85 ; 0x55\n",
- " 32: 2855 cmp r0, #85 ; 0x55\n",
- " 34: f110 0f55 cmn.w r0, #85 ; 0x55\n",
- " 38: 1d48 adds r0, r1, #5\n",
- " 3a: 1f48 subs r0, r1, #5\n",
- " 3c: 2055 movs r0, #85 ; 0x55\n",
- " 3e: f07f 0055 mvns.w r0, #85 ; 0x55\n",
- " 42: 1d48 adds r0, r1, #5\n",
- " 44: 1f48 subs r0, r1, #5\n",
+ " 16: f061 0055 orn r0, r1, #85 ; 0x55\n",
+ " 1a: f081 0055 eor.w r0, r1, #85 ; 0x55\n",
+ " 1e: f021 0055 bic.w r0, r1, #85 ; 0x55\n",
+ " 22: f141 0055 adc.w r0, r1, #85 ; 0x55\n",
+ " 26: f161 0055 sbc.w r0, r1, #85 ; 0x55\n",
+ " 2a: f1c1 0055 rsb r0, r1, #85 ; 0x55\n",
+ " 2e: f010 0f55 tst.w r0, #85 ; 0x55\n",
+ " 32: f090 0f55 teq r0, #85 ; 0x55\n",
+ " 36: 2855 cmp r0, #85 ; 0x55\n",
+ " 38: f110 0f55 cmn.w r0, #85 ; 0x55\n",
+ " 3c: 1d48 adds r0, r1, #5\n",
+ " 3e: 1f48 subs r0, r1, #5\n",
+ " 40: 2055 movs r0, #85 ; 0x55\n",
+ " 42: f07f 0055 mvns.w r0, #85 ; 0x55\n",
+ " 46: 1d48 adds r0, r1, #5\n",
+ " 48: 1f48 subs r0, r1, #5\n",
nullptr
};
const char* DataProcessingModifiedImmediateResults[] = {
@@ -159,15 +161,16 @@ const char* DataProcessingModifiedImmediateResults[] = {
" c: f1a1 1055 sub.w r0, r1, #5570645 ; 0x550055\n",
" 10: f001 1055 and.w r0, r1, #5570645 ; 0x550055\n",
" 14: f041 1055 orr.w r0, r1, #5570645 ; 0x550055\n",
- " 18: f081 1055 eor.w r0, r1, #5570645 ; 0x550055\n",
- " 1c: f021 1055 bic.w r0, r1, #5570645 ; 0x550055\n",
- " 20: f141 1055 adc.w r0, r1, #5570645 ; 0x550055\n",
- " 24: f161 1055 sbc.w r0, r1, #5570645 ; 0x550055\n",
- " 28: f1c1 1055 rsb r0, r1, #5570645 ; 0x550055\n",
- " 2c: f010 1f55 tst.w r0, #5570645 ; 0x550055\n",
- " 30: f090 1f55 teq r0, #5570645 ; 0x550055\n",
- " 34: f1b0 1f55 cmp.w r0, #5570645 ; 0x550055\n",
- " 38: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
+ " 18: f061 1055 orn r0, r1, #5570645 ; 0x550055\n",
+ " 1c: f081 1055 eor.w r0, r1, #5570645 ; 0x550055\n",
+ " 20: f021 1055 bic.w r0, r1, #5570645 ; 0x550055\n",
+ " 24: f141 1055 adc.w r0, r1, #5570645 ; 0x550055\n",
+ " 28: f161 1055 sbc.w r0, r1, #5570645 ; 0x550055\n",
+ " 2c: f1c1 1055 rsb r0, r1, #5570645 ; 0x550055\n",
+ " 30: f010 1f55 tst.w r0, #5570645 ; 0x550055\n",
+ " 34: f090 1f55 teq r0, #5570645 ; 0x550055\n",
+ " 38: f1b0 1f55 cmp.w r0, #5570645 ; 0x550055\n",
+ " 3c: f110 1f55 cmn.w r0, #5570645 ; 0x550055\n",
nullptr
};
const char* DataProcessingModifiedImmediatesResults[] = {
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index ff4a1a4333..1038f44ffe 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -70,6 +70,13 @@ class Label {
public:
Label() : position_(0) {}
+ Label(Label&& src)
+ : position_(src.position_) {
+ // We must unlink/unbind the src label when moving; if not, calling the destructor on
+ // the src label would fail.
+ src.position_ = 0;
+ }
+
~Label() {
// Assert if label is being destroyed with unresolved branches pending.
CHECK(!IsLinked());
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index c5fae92f3c..6f35e9ef59 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -19,6 +19,7 @@
#include "base/bit_utils.h"
#include "base/casts.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "memory_region.h"
#include "thread.h"
@@ -34,170 +35,191 @@ std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
return os;
}
-void MipsAssembler::Emit(int32_t value) {
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<int32_t>(value);
+void MipsAssembler::FinalizeCode() {
+ for (auto& exception_block : exception_blocks_) {
+ EmitExceptionPoll(&exception_block);
+ }
+ PromoteBranches();
+}
+
+void MipsAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ EmitBranches();
+ Assembler::FinalizeInstructions(region);
+}
+
+void MipsAssembler::EmitBranches() {
+ CHECK(!overwriting_);
+ // Switch from appending instructions at the end of the buffer to overwriting
+ // existing instructions (branch placeholders) in the buffer.
+ overwriting_ = true;
+ for (auto& branch : branches_) {
+ EmitBranch(&branch);
+ }
+ overwriting_ = false;
+}
+
+void MipsAssembler::Emit(uint32_t value) {
+ if (overwriting_) {
+ // Branches to labels are emitted into their placeholders here.
+ buffer_.Store<uint32_t>(overwrite_location_, value);
+ overwrite_location_ += sizeof(uint32_t);
+ } else {
+ // Other instructions are simply appended at the end here.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<uint32_t>(value);
+ }
}
void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
CHECK_NE(rd, kNoRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- static_cast<int32_t>(rd) << kRdShift |
- shamt << kShamtShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ static_cast<uint32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
Emit(encoding);
}
void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
CHECK_NE(rs, kNoRegister);
CHECK_NE(rt, kNoRegister);
- int32_t encoding = opcode << kOpcodeShift |
- static_cast<int32_t>(rs) << kRsShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void MipsAssembler::EmitI21(int opcode, Register rs, uint32_t imm21) {
+ CHECK_NE(rs, kNoRegister);
+ CHECK(IsUint<21>(imm21)) << imm21;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ imm21;
Emit(encoding);
}
-void MipsAssembler::EmitJ(int opcode, int address) {
- int32_t encoding = opcode << kOpcodeShift |
- address;
+void MipsAssembler::EmitI26(int opcode, uint32_t imm26) {
+ CHECK(IsUint<26>(imm26)) << imm26;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
Emit(encoding);
}
-void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) {
+void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd,
+ int funct) {
CHECK_NE(ft, kNoFRegister);
CHECK_NE(fs, kNoFRegister);
CHECK_NE(fd, kNoFRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(ft) << kFtShift |
- static_cast<int32_t>(fs) << kFsShift |
- static_cast<int32_t>(fd) << kFdShift |
- funct;
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ static_cast<uint32_t>(fs) << kFsShift |
+ static_cast<uint32_t>(fd) << kFdShift |
+ funct;
Emit(encoding);
}
-void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) {
- CHECK_NE(rt, kNoFRegister);
- int32_t encoding = opcode << kOpcodeShift |
- fmt << kFmtShift |
- static_cast<int32_t>(rt) << kRtShift |
- imm;
+void MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm) {
+ CHECK_NE(ft, kNoFRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<uint32_t>(ft) << kFtShift |
+ imm;
Emit(encoding);
}
-void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the branch instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (equal) {
- Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
- } else {
- Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
- }
+void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x21);
}
-void MipsAssembler::EmitJump(Label* label, bool link) {
- int offset;
- if (label->IsBound()) {
- offset = label->Position() - buffer_.Size();
- } else {
- // Use the offset field of the jump instruction for linking the sites.
- offset = label->position_;
- label->LinkTo(buffer_.Size());
- }
- if (link) {
- Jal((offset >> 2) & kJumpOffsetMask);
- } else {
- J((offset >> 2) & kJumpOffsetMask);
- }
+void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
+ EmitI(0x9, rs, rt, imm16);
}
-int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
- CHECK_ALIGNED(offset, 4);
- CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
+void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
+ EmitR(0, rs, rt, rd, 0, 0x23);
+}
- // Properly preserve only the bits supported in the instruction.
- offset >>= 2;
- if (is_jump) {
- offset &= kJumpOffsetMask;
- return (inst & ~kJumpOffsetMask) | offset;
- } else {
- offset &= kBranchOffsetMask;
- return (inst & ~kBranchOffsetMask) | offset;
- }
+void MipsAssembler::MultR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
}
-int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
- // Sign-extend, then left-shift by 2.
- if (is_jump) {
- return (((inst & kJumpOffsetMask) << 6) >> 4);
- } else {
- return (((inst & kBranchOffsetMask) << 16) >> 14);
- }
+void MipsAssembler::MultuR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
}
-void MipsAssembler::Bind(Label* label, bool is_jump) {
- CHECK(!label->IsBound());
- int bound_pc = buffer_.Size();
- while (label->IsLinked()) {
- int32_t position = label->Position();
- int32_t next = buffer_.Load<int32_t>(position);
- int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
- int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump);
- buffer_.Store<int32_t>(position, encoded);
- label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump);
- }
- label->BindTo(bound_pc);
+void MipsAssembler::DivR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
}
-void MipsAssembler::Add(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x20);
+void MipsAssembler::DivuR2(Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
}
-void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x21);
+void MipsAssembler::MulR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ EmitR(0x1c, rs, rt, rd, 0, 2);
}
-void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x8, rs, rt, imm16);
+void MipsAssembler::DivR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivR2(rs, rt);
+ Mflo(rd);
}
-void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
- EmitI(0x9, rs, rt, imm16);
+void MipsAssembler::ModR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivR2(rs, rt);
+ Mfhi(rd);
}
-void MipsAssembler::Sub(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x22);
+void MipsAssembler::DivuR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivuR2(rs, rt);
+ Mflo(rd);
}
-void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
- EmitR(0, rs, rt, rd, 0, 0x23);
+void MipsAssembler::ModuR2(Register rd, Register rs, Register rt) {
+ CHECK(!IsR6());
+ DivuR2(rs, rt);
+ Mfhi(rd);
}
-void MipsAssembler::Mult(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18);
+void MipsAssembler::MulR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x18);
}
-void MipsAssembler::Multu(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19);
+void MipsAssembler::MuhuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x19);
}
-void MipsAssembler::Div(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a);
+void MipsAssembler::DivR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x1a);
}
-void MipsAssembler::Divu(Register rs, Register rt) {
- EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b);
+void MipsAssembler::ModR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x1a);
+}
+
+void MipsAssembler::DivuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 2, 0x1b);
+}
+
+void MipsAssembler::ModuR6(Register rd, Register rs, Register rt) {
+ CHECK(IsR6());
+ EmitR(0, rs, rt, rd, 3, 0x1b);
}
void MipsAssembler::And(Register rd, Register rs, Register rt) {
@@ -228,27 +250,35 @@ void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
EmitR(0, rs, rt, rd, 0, 0x27);
}
-void MipsAssembler::Sll(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x00);
+void MipsAssembler::Seb(Register rd, Register rt) {
+ EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x10, 0x20);
}
-void MipsAssembler::Srl(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x02);
+void MipsAssembler::Seh(Register rd, Register rt) {
+ EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x18, 0x20);
}
-void MipsAssembler::Sra(Register rd, Register rs, int shamt) {
- EmitR(0, rs, static_cast<Register>(0), rd, shamt, 0x03);
+void MipsAssembler::Sll(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x00);
}
-void MipsAssembler::Sllv(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srl(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x02);
+}
+
+void MipsAssembler::Sra(Register rd, Register rt, int shamt) {
+ EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x03);
+}
+
+void MipsAssembler::Sllv(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x04);
}
-void MipsAssembler::Srlv(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srlv(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x06);
}
-void MipsAssembler::Srav(Register rd, Register rs, Register rt) {
+void MipsAssembler::Srav(Register rd, Register rt, Register rs) {
EmitR(0, rs, rt, rd, 0, 0x07);
}
@@ -276,11 +306,18 @@ void MipsAssembler::Lui(Register rt, uint16_t imm16) {
EmitI(0xf, static_cast<Register>(0), rt, imm16);
}
+void MipsAssembler::Sync(uint32_t stype) {
+ EmitR(0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0),
+ stype & 0x1f, 0xf);
+}
+
void MipsAssembler::Mfhi(Register rd) {
+ CHECK(!IsR6());
EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x10);
}
void MipsAssembler::Mflo(Register rd) {
+ CHECK(!IsR6());
EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rd, 0, 0x12);
}
@@ -312,34 +349,276 @@ void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
EmitI(0xb, rs, rt, imm16);
}
-void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) {
+void MipsAssembler::B(uint16_t imm16) {
+ EmitI(0x4, static_cast<Register>(0), static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Beq(Register rs, Register rt, uint16_t imm16) {
EmitI(0x4, rs, rt, imm16);
- Nop();
}
-void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) {
+void MipsAssembler::Bne(Register rs, Register rt, uint16_t imm16) {
EmitI(0x5, rs, rt, imm16);
- Nop();
}
-void MipsAssembler::J(uint32_t address) {
- EmitJ(0x2, address);
- Nop();
+void MipsAssembler::Beqz(Register rt, uint16_t imm16) {
+ Beq(ZERO, rt, imm16);
}
-void MipsAssembler::Jal(uint32_t address) {
- EmitJ(0x2, address);
- Nop();
+void MipsAssembler::Bnez(Register rt, uint16_t imm16) {
+ Bne(ZERO, rt, imm16);
}
-void MipsAssembler::Jr(Register rs) {
- EmitR(0, rs, static_cast<Register>(0), static_cast<Register>(0), 0, 0x09); // Jalr zero, rs
- Nop();
+void MipsAssembler::Bltz(Register rt, uint16_t imm16) {
+ EmitI(0x1, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Bgez(Register rt, uint16_t imm16) {
+ EmitI(0x1, rt, static_cast<Register>(0x1), imm16);
+}
+
+void MipsAssembler::Blez(Register rt, uint16_t imm16) {
+ EmitI(0x6, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::Bgtz(Register rt, uint16_t imm16) {
+ EmitI(0x7, rt, static_cast<Register>(0), imm16);
+}
+
+void MipsAssembler::J(uint32_t addr26) {
+ EmitI26(0x2, addr26);
+}
+
+void MipsAssembler::Jal(uint32_t addr26) {
+ EmitI26(0x3, addr26);
+}
+
+void MipsAssembler::Jalr(Register rd, Register rs) {
+ EmitR(0, rs, static_cast<Register>(0), rd, 0, 0x09);
}
void MipsAssembler::Jalr(Register rs) {
- EmitR(0, rs, static_cast<Register>(0), RA, 0, 0x09);
- Nop();
+ Jalr(RA, rs);
+}
+
+void MipsAssembler::Jr(Register rs) {
+ Jalr(ZERO, rs);
+}
+
+void MipsAssembler::Nal() {
+ EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x10), 0);
+}
+
+void MipsAssembler::Auipc(Register rs, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x3B, rs, static_cast<Register>(0x1E), imm16);
+}
+
+void MipsAssembler::Addiupc(Register rs, uint32_t imm19) {
+ CHECK(IsR6());
+ CHECK(IsUint<19>(imm19)) << imm19;
+ EmitI21(0x3B, rs, imm19);
+}
+
+void MipsAssembler::Bc(uint32_t imm26) {
+ CHECK(IsR6());
+ EmitI26(0x32, imm26);
+}
+
+void MipsAssembler::Jic(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x36, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Jialc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ EmitI(0x3E, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bltc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x17, rs, rt, imm16);
+}
+
+void MipsAssembler::Bltzc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, rt, rt, imm16);
+}
+
+void MipsAssembler::Bgtzc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x17, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bgec(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x16, rs, rt, imm16);
+}
+
+void MipsAssembler::Bgezc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, rt, rt, imm16);
+}
+
+void MipsAssembler::Blezc(Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rt, ZERO);
+ EmitI(0x16, static_cast<Register>(0), rt, imm16);
+}
+
+void MipsAssembler::Bltuc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x7, rs, rt, imm16);
+}
+
+void MipsAssembler::Bgeuc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x6, rs, rt, imm16);
+}
+
+void MipsAssembler::Beqc(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
+}
+
+void MipsAssembler::Bnec(Register rs, Register rt, uint16_t imm16) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ CHECK_NE(rt, ZERO);
+ CHECK_NE(rs, rt);
+ EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
+}
+
+void MipsAssembler::Beqzc(Register rs, uint32_t imm21) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x36, rs, imm21);
+}
+
+void MipsAssembler::Bnezc(Register rs, uint32_t imm21) {
+ CHECK(IsR6());
+ CHECK_NE(rs, ZERO);
+ EmitI21(0x3E, rs, imm21);
+}
+
+void MipsAssembler::EmitBcond(BranchCondition cond, Register rs, Register rt, uint16_t imm16) {
+ switch (cond) {
+ case kCondLTZ:
+ CHECK_EQ(rt, ZERO);
+ Bltz(rs, imm16);
+ break;
+ case kCondGEZ:
+ CHECK_EQ(rt, ZERO);
+ Bgez(rs, imm16);
+ break;
+ case kCondLEZ:
+ CHECK_EQ(rt, ZERO);
+ Blez(rs, imm16);
+ break;
+ case kCondGTZ:
+ CHECK_EQ(rt, ZERO);
+ Bgtz(rs, imm16);
+ break;
+ case kCondEQ:
+ Beq(rs, rt, imm16);
+ break;
+ case kCondNE:
+ Bne(rs, rt, imm16);
+ break;
+ case kCondEQZ:
+ CHECK_EQ(rt, ZERO);
+ Beqz(rs, imm16);
+ break;
+ case kCondNEZ:
+ CHECK_EQ(rt, ZERO);
+ Bnez(rs, imm16);
+ break;
+ case kCondLT:
+ case kCondGE:
+ case kCondLE:
+ case kCondGT:
+ case kCondLTU:
+ case kCondGEU:
+ case kUncond:
+ // We don't support synthetic R2 branches (preceded with slt[u]) at this level
+ // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ UNREACHABLE();
+ }
+}
+
+void MipsAssembler::EmitBcondc(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21) {
+ switch (cond) {
+ case kCondLT:
+ Bltc(rs, rt, imm16_21);
+ break;
+ case kCondGE:
+ Bgec(rs, rt, imm16_21);
+ break;
+ case kCondLE:
+ Bgec(rt, rs, imm16_21);
+ break;
+ case kCondGT:
+ Bltc(rt, rs, imm16_21);
+ break;
+ case kCondLTZ:
+ CHECK_EQ(rt, ZERO);
+ Bltzc(rs, imm16_21);
+ break;
+ case kCondGEZ:
+ CHECK_EQ(rt, ZERO);
+ Bgezc(rs, imm16_21);
+ break;
+ case kCondLEZ:
+ CHECK_EQ(rt, ZERO);
+ Blezc(rs, imm16_21);
+ break;
+ case kCondGTZ:
+ CHECK_EQ(rt, ZERO);
+ Bgtzc(rs, imm16_21);
+ break;
+ case kCondEQ:
+ Beqc(rs, rt, imm16_21);
+ break;
+ case kCondNE:
+ Bnec(rs, rt, imm16_21);
+ break;
+ case kCondEQZ:
+ CHECK_EQ(rt, ZERO);
+ Beqzc(rs, imm16_21);
+ break;
+ case kCondNEZ:
+ CHECK_EQ(rt, ZERO);
+ Bnezc(rs, imm16_21);
+ break;
+ case kCondLTU:
+ Bltuc(rs, rt, imm16_21);
+ break;
+ case kCondGEU:
+ Bgeuc(rs, rt, imm16_21);
+ break;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ UNREACHABLE();
+ }
}
void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
@@ -358,52 +637,84 @@ void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
}
-void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x0);
+void MipsAssembler::AddD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
}
-void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x1);
+void MipsAssembler::SubD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
}
-void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x2);
+void MipsAssembler::MulD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
}
-void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) {
- EmitFR(0x11, 0x11, ConvertDRegToFReg(ft), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x3);
+void MipsAssembler::DivD(FRegister fd, FRegister fs, FRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
}
void MipsAssembler::MovS(FRegister fd, FRegister fs) {
EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6);
}
-void MipsAssembler::MovD(DRegister fd, DRegister fs) {
- EmitFR(0x11, 0x11, static_cast<FRegister>(0), ConvertDRegToFReg(fs), ConvertDRegToFReg(fd), 0x6);
+void MipsAssembler::MovD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x6);
+}
+
+void MipsAssembler::NegS(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x7);
+}
+
+void MipsAssembler::NegD(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7);
+}
+
+void MipsAssembler::Cvtsw(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20);
+}
+
+void MipsAssembler::Cvtdw(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x21);
+}
+
+void MipsAssembler::Cvtsd(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x20);
+}
+
+void MipsAssembler::Cvtds(FRegister fd, FRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21);
}
void MipsAssembler::Mfc1(Register rt, FRegister fs) {
- EmitFR(0x11, 0x00, ConvertRegToFReg(rt), fs, static_cast<FRegister>(0), 0x0);
+ EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mtc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x04, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
-void MipsAssembler::Mtc1(FRegister ft, Register rs) {
- EmitFR(0x11, 0x04, ft, ConvertRegToFReg(rs), static_cast<FRegister>(0), 0x0);
+void MipsAssembler::Mfhc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x03, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
+}
+
+void MipsAssembler::Mthc1(Register rt, FRegister fs) {
+ EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
}
void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x31, rs, ConvertFRegToReg(ft), imm16);
+ EmitI(0x31, rs, static_cast<Register>(ft), imm16);
}
-void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x35, rs, ConvertDRegToReg(ft), imm16);
+void MipsAssembler::Ldc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x35, rs, static_cast<Register>(ft), imm16);
}
void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x39, rs, ConvertFRegToReg(ft), imm16);
+ EmitI(0x39, rs, static_cast<Register>(ft), imm16);
}
-void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) {
- EmitI(0x3d, rs, ConvertDRegToReg(ft), imm16);
+void MipsAssembler::Sdc1(FRegister ft, Register rs, uint16_t imm16) {
+ EmitI(0x3d, rs, static_cast<Register>(ft), imm16);
}
void MipsAssembler::Break() {
@@ -415,63 +726,881 @@ void MipsAssembler::Nop() {
EmitR(0x0, static_cast<Register>(0), static_cast<Register>(0), static_cast<Register>(0), 0, 0x0);
}
-void MipsAssembler::Move(Register rt, Register rs) {
- EmitI(0x9, rs, rt, 0); // Addiu
+void MipsAssembler::Move(Register rd, Register rs) {
+ Or(rd, rs, ZERO);
}
-void MipsAssembler::Clear(Register rt) {
- EmitR(0, static_cast<Register>(0), static_cast<Register>(0), rt, 0, 0x20);
+void MipsAssembler::Clear(Register rd) {
+ Move(rd, ZERO);
}
-void MipsAssembler::Not(Register rt, Register rs) {
- EmitR(0, static_cast<Register>(0), rs, rt, 0, 0x27);
+void MipsAssembler::Not(Register rd, Register rs) {
+ Nor(rd, rs, ZERO);
}
-void MipsAssembler::Mul(Register rd, Register rs, Register rt) {
- Mult(rs, rt);
- Mflo(rd);
+void MipsAssembler::Push(Register rs) {
+ IncreaseFrameSize(kMipsWordSize);
+ Sw(rs, SP, 0);
}
-void MipsAssembler::Div(Register rd, Register rs, Register rt) {
- Div(rs, rt);
- Mflo(rd);
+void MipsAssembler::Pop(Register rd) {
+ Lw(rd, SP, 0);
+ DecreaseFrameSize(kMipsWordSize);
}
-void MipsAssembler::Rem(Register rd, Register rs, Register rt) {
- Div(rs, rt);
- Mfhi(rd);
+void MipsAssembler::PopAndReturn(Register rd, Register rt) {
+ Lw(rd, SP, 0);
+ Jr(rt);
+ DecreaseFrameSize(kMipsWordSize);
}
-void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) {
- Addiu(rt, rs, value);
+void MipsAssembler::LoadConst32(Register rd, int32_t value) {
+ if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ Ori(rd, ZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ Addiu(rd, ZERO, value);
+ } else {
+ Lui(rd, High16Bits(value));
+ if (value & 0xFFFF)
+ Ori(rd, rd, Low16Bits(value));
+ }
}
-void MipsAssembler::LoadImmediate(Register rt, int32_t value) {
- Addiu(rt, ZERO, value);
+void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value) {
+ LoadConst32(reg_lo, Low32Bits(value));
+ LoadConst32(reg_hi, High32Bits(value));
}
-void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
- size_t size) {
- MipsManagedRegister dst = m_dst.AsMips();
- if (dst.IsNoRegister()) {
- CHECK_EQ(0u, size) << dst;
- } else if (dst.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
- } else if (dst.IsRegisterPair()) {
- CHECK_EQ(8u, size) << dst;
- LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
- LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
- } else if (dst.IsFRegister()) {
- LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+void MipsAssembler::StoreConst32ToOffset(int32_t value,
+ Register base,
+ int32_t offset,
+ Register temp) {
+ if (!IsInt<16>(offset)) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+ LoadConst32(temp, value);
+ Sw(temp, base, offset);
+}
+
+void MipsAssembler::StoreConst64ToOffset(int64_t value,
+ Register base,
+ int32_t offset,
+ Register temp) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) || !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize))) {
+ CHECK_NE(temp, AT); // Must not use AT as temp, as not to overwrite the loaded value.
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+ LoadConst32(temp, Low32Bits(value));
+ Sw(temp, base, offset);
+ LoadConst32(temp, High32Bits(value));
+ Sw(temp, base, offset + kMipsWordSize);
+}
+
+void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) {
+ LoadConst32(temp, value);
+ Mtc1(temp, r);
+}
+
+void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
+ LoadConst32(temp, Low32Bits(value));
+ Mtc1(temp, rd);
+ LoadConst32(temp, High32Bits(value));
+ Mthc1(temp, rd);
+}
+
+void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) {
+ if (IsInt<16>(value)) {
+ Addiu(rt, rs, value);
+ } else {
+ LoadConst32(temp, value);
+ Addu(rt, rs, temp);
+ }
+}
+
+void MipsAssembler::Branch::InitShortOrLong(MipsAssembler::Branch::OffsetBits offset_size,
+ MipsAssembler::Branch::Type short_type,
+ MipsAssembler::Branch::Type long_type) {
+ type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
+}
+
+void MipsAssembler::Branch::InitializeType(bool is_call, bool is_r6) {
+ OffsetBits offset_size = GetOffsetSizeNeeded(location_, target_);
+ if (is_r6) {
+ // R6
+ if (is_call) {
+ InitShortOrLong(offset_size, kR6Call, kR6LongCall);
+ } else if (condition_ == kUncond) {
+ InitShortOrLong(offset_size, kR6UncondBranch, kR6LongUncondBranch);
+ } else {
+ if (condition_ == kCondEQZ || condition_ == kCondNEZ) {
+ // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
+ type_ = (offset_size <= kOffset23) ? kR6CondBranch : kR6LongCondBranch;
+ } else {
+ InitShortOrLong(offset_size, kR6CondBranch, kR6LongCondBranch);
+ }
+ }
+ } else {
+ // R2
+ if (is_call) {
+ InitShortOrLong(offset_size, kCall, kLongCall);
+ } else if (condition_ == kUncond) {
+ InitShortOrLong(offset_size, kUncondBranch, kLongUncondBranch);
+ } else {
+ InitShortOrLong(offset_size, kCondBranch, kLongCondBranch);
+ }
+ }
+ old_type_ = type_;
+}
+
+bool MipsAssembler::Branch::IsNop(BranchCondition condition, Register lhs, Register rhs) {
+ switch (condition) {
+ case kCondLT:
+ case kCondGT:
+ case kCondNE:
+ case kCondLTU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+bool MipsAssembler::Branch::IsUncond(BranchCondition condition, Register lhs, Register rhs) {
+ switch (condition) {
+ case kUncond:
+ return true;
+ case kCondGE:
+ case kCondLE:
+ case kCondEQ:
+ case kCondGEU:
+ return lhs == rhs;
+ default:
+ return false;
+ }
+}
+
+MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(0),
+ rhs_reg_(0),
+ condition_(kUncond) {
+ InitializeType(false, is_r6);
+}
+
+MipsAssembler::Branch::Branch(bool is_r6,
+ uint32_t location,
+ uint32_t target,
+ MipsAssembler::BranchCondition condition,
+ Register lhs_reg,
+ Register rhs_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(lhs_reg),
+ rhs_reg_(rhs_reg),
+ condition_(condition) {
+ CHECK_NE(condition, kUncond);
+ switch (condition) {
+ case kCondLT:
+ case kCondGE:
+ case kCondLE:
+ case kCondGT:
+ case kCondLTU:
+ case kCondGEU:
+ // We don't support synthetic R2 branches (preceded with slt[u]) at this level
+ // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
+ // We leave this up to the caller.
+ CHECK(is_r6);
+ FALLTHROUGH_INTENDED;
+ case kCondEQ:
+ case kCondNE:
+ // Require registers other than 0 not only for R6, but also for R2 to catch errors.
+ // To compare with 0, use dedicated kCond*Z conditions.
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_NE(rhs_reg, ZERO);
+ break;
+ case kCondLTZ:
+ case kCondGEZ:
+ case kCondLEZ:
+ case kCondGTZ:
+ case kCondEQZ:
+ case kCondNEZ:
+ // Require registers other than 0 not only for R6, but also for R2 to catch errors.
+ CHECK_NE(lhs_reg, ZERO);
+ CHECK_EQ(rhs_reg, ZERO);
+ break;
+ case kUncond:
+ UNREACHABLE();
+ }
+ CHECK(!IsNop(condition, lhs_reg, rhs_reg));
+ if (IsUncond(condition, lhs_reg, rhs_reg)) {
+ // Branch condition is always true, make the branch unconditional.
+ condition_ = kUncond;
+ }
+ InitializeType(false, is_r6);
+}
+
+MipsAssembler::Branch::Branch(bool is_r6, uint32_t location, uint32_t target, Register indirect_reg)
+ : old_location_(location),
+ location_(location),
+ target_(target),
+ lhs_reg_(indirect_reg),
+ rhs_reg_(0),
+ condition_(kUncond) {
+ CHECK_NE(indirect_reg, ZERO);
+ CHECK_NE(indirect_reg, AT);
+ InitializeType(true, is_r6);
+}
+
+MipsAssembler::BranchCondition MipsAssembler::Branch::OppositeCondition(
+ MipsAssembler::BranchCondition cond) {
+ switch (cond) {
+ case kCondLT:
+ return kCondGE;
+ case kCondGE:
+ return kCondLT;
+ case kCondLE:
+ return kCondGT;
+ case kCondGT:
+ return kCondLE;
+ case kCondLTZ:
+ return kCondGEZ;
+ case kCondGEZ:
+ return kCondLTZ;
+ case kCondLEZ:
+ return kCondGTZ;
+ case kCondGTZ:
+ return kCondLEZ;
+ case kCondEQ:
+ return kCondNE;
+ case kCondNE:
+ return kCondEQ;
+ case kCondEQZ:
+ return kCondNEZ;
+ case kCondNEZ:
+ return kCondEQZ;
+ case kCondLTU:
+ return kCondGEU;
+ case kCondGEU:
+ return kCondLTU;
+ case kUncond:
+ LOG(FATAL) << "Unexpected branch condition " << cond;
+ }
+ UNREACHABLE();
+}
+
+MipsAssembler::Branch::Type MipsAssembler::Branch::GetType() const {
+ return type_;
+}
+
+MipsAssembler::BranchCondition MipsAssembler::Branch::GetCondition() const {
+ return condition_;
+}
+
+Register MipsAssembler::Branch::GetLeftRegister() const {
+ return static_cast<Register>(lhs_reg_);
+}
+
+Register MipsAssembler::Branch::GetRightRegister() const {
+ return static_cast<Register>(rhs_reg_);
+}
+
+uint32_t MipsAssembler::Branch::GetTarget() const {
+ return target_;
+}
+
+uint32_t MipsAssembler::Branch::GetLocation() const {
+ return location_;
+}
+
+uint32_t MipsAssembler::Branch::GetOldLocation() const {
+ return old_location_;
+}
+
+uint32_t MipsAssembler::Branch::GetLength() const {
+ return branch_info_[type_].length;
+}
+
+uint32_t MipsAssembler::Branch::GetOldLength() const {
+ return branch_info_[old_type_].length;
+}
+
+uint32_t MipsAssembler::Branch::GetSize() const {
+ return GetLength() * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetOldSize() const {
+ return GetOldLength() * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetEndLocation() const {
+ return GetLocation() + GetSize();
+}
+
+uint32_t MipsAssembler::Branch::GetOldEndLocation() const {
+ return GetOldLocation() + GetOldSize();
+}
+
+bool MipsAssembler::Branch::IsLong() const {
+ switch (type_) {
+ // R2 short branches.
+ case kUncondBranch:
+ case kCondBranch:
+ case kCall:
+ // R6 short branches.
+ case kR6UncondBranch:
+ case kR6CondBranch:
+ case kR6Call:
+ return false;
+ // R2 long branches.
+ case kLongUncondBranch:
+ case kLongCondBranch:
+ case kLongCall:
+ // R6 long branches.
+ case kR6LongUncondBranch:
+ case kR6LongCondBranch:
+ case kR6LongCall:
+ return true;
+ }
+ UNREACHABLE();
+}
+
+bool MipsAssembler::Branch::IsResolved() const {
+ return target_ != kUnresolved;
+}
+
+MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSize() const {
+ OffsetBits offset_size =
+ (type_ == kR6CondBranch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
+ ? kOffset23
+ : branch_info_[type_].offset_size;
+ return offset_size;
+}
+
+MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSizeNeeded(uint32_t location,
+ uint32_t target) {
+ // For unresolved targets assume the shortest encoding
+ // (later it will be made longer if needed).
+ if (target == kUnresolved)
+ return kOffset16;
+ int64_t distance = static_cast<int64_t>(target) - location;
+ // To simplify calculations in composite branches consisting of multiple instructions
+ // bump up the distance by a value larger than the max byte size of a composite branch.
+ distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
+ if (IsInt<kOffset16>(distance))
+ return kOffset16;
+ else if (IsInt<kOffset18>(distance))
+ return kOffset18;
+ else if (IsInt<kOffset21>(distance))
+ return kOffset21;
+ else if (IsInt<kOffset23>(distance))
+ return kOffset23;
+ else if (IsInt<kOffset28>(distance))
+ return kOffset28;
+ return kOffset32;
+}
+
+void MipsAssembler::Branch::Resolve(uint32_t target) {
+ target_ = target;
+}
+
+void MipsAssembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
+ if (location_ > expand_location) {
+ location_ += delta;
+ }
+ if (!IsResolved()) {
+ return; // Don't know the target yet.
+ }
+ if (target_ > expand_location) {
+ target_ += delta;
+ }
+}
+
+void MipsAssembler::Branch::PromoteToLong() {
+ switch (type_) {
+ // R2 short branches.
+ case kUncondBranch:
+ type_ = kLongUncondBranch;
+ break;
+ case kCondBranch:
+ type_ = kLongCondBranch;
+ break;
+ case kCall:
+ type_ = kLongCall;
+ break;
+ // R6 short branches.
+ case kR6UncondBranch:
+ type_ = kR6LongUncondBranch;
+ break;
+ case kR6CondBranch:
+ type_ = kR6LongCondBranch;
+ break;
+ case kR6Call:
+ type_ = kR6LongCall;
+ break;
+ default:
+ // Note: 'type_' is already long.
+ break;
+ }
+ CHECK(IsLong());
+}
+
+uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
+ // If the branch is still unresolved or already long, nothing to do.
+ if (IsLong() || !IsResolved()) {
+ return 0;
+ }
+ // Promote the short branch to long if the offset size is too small
+ // to hold the distance between location_ and target_.
+ if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ // The following logic is for debugging/testing purposes.
+ // Promote some short branches to long when it's not really required.
+ if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max())) {
+ int64_t distance = static_cast<int64_t>(target_) - location_;
+ distance = (distance >= 0) ? distance : -distance;
+ if (distance >= max_short_distance) {
+ PromoteToLong();
+ uint32_t old_size = GetOldSize();
+ uint32_t new_size = GetSize();
+ CHECK_GT(new_size, old_size);
+ return new_size - old_size;
+ }
+ }
+ return 0;
+}
+
+uint32_t MipsAssembler::Branch::GetOffsetLocation() const {
+ return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
+}
+
+uint32_t MipsAssembler::Branch::GetOffset() const {
+ CHECK(IsResolved());
+ uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
+ // Calculate the byte distance between instructions and also account for
+ // different PC-relative origins.
+ uint32_t offset = target_ - GetOffsetLocation() - branch_info_[type_].pc_org * sizeof(uint32_t);
+ // Prepare the offset for encoding into the instruction(s).
+ offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
+ return offset;
+}
+
+MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+const MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) const {
+ CHECK_LT(branch_id, branches_.size());
+ return &branches_[branch_id];
+}
+
+void MipsAssembler::Bind(MipsLabel* label) {
+ CHECK(!label->IsBound());
+ uint32_t bound_pc = buffer_.Size();
+
+ // Walk the list of branches referring to and preceding this label.
+ // Store the previously unknown target addresses in them.
+ while (label->IsLinked()) {
+ uint32_t branch_id = label->Position();
+ Branch* branch = GetBranch(branch_id);
+ branch->Resolve(bound_pc);
+
+ uint32_t branch_location = branch->GetLocation();
+ // Extract the location of the previous branch in the list (walking the list backwards;
+ // the previous branch ID was stored in the space reserved for this branch).
+ uint32_t prev = buffer_.Load<uint32_t>(branch_location);
+
+ // On to the previous branch in the list...
+ label->position_ = prev;
+ }
+
+ // Now make the label object contain its own location (relative to the end of the preceding
+ // branch, if any; it will be used by the branches referring to and following this label).
+ label->prev_branch_id_plus_one_ = branches_.size();
+ if (label->prev_branch_id_plus_one_) {
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ bound_pc -= branch->GetEndLocation();
+ }
+ label->BindTo(bound_pc);
+}
+
+uint32_t MipsAssembler::GetLabelLocation(MipsLabel* label) const {
+ CHECK(label->IsBound());
+ uint32_t target = label->Position();
+ if (label->prev_branch_id_plus_one_) {
+ // Get label location based on the branch preceding it.
+ uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
+ const Branch* branch = GetBranch(branch_id);
+ target += branch->GetEndLocation();
+ }
+ return target;
+}
+
+uint32_t MipsAssembler::GetAdjustedPosition(uint32_t old_position) {
+ // We can reconstruct the adjustment by going through all the branches from the beginning
+ // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
+ // with increasing old_position, we can use the data from last AdjustedPosition() to
+ // continue where we left off and the whole loop should be O(m+n) where m is the number
+ // of positions to adjust and n is the number of branches.
+ if (old_position < last_old_position_) {
+ last_position_adjustment_ = 0;
+ last_old_position_ = 0;
+ last_branch_id_ = 0;
+ }
+ while (last_branch_id_ != branches_.size()) {
+ const Branch* branch = GetBranch(last_branch_id_);
+ if (branch->GetLocation() >= old_position + last_position_adjustment_) {
+ break;
+ }
+ last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
+ ++last_branch_id_;
+ }
+ last_old_position_ = old_position;
+ return old_position + last_position_adjustment_;
+}
+
+void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
+ uint32_t length = branches_.back().GetLength();
+ if (!label->IsBound()) {
+ // Branch forward (to a following label), distance is unknown.
+ // The first branch forward will contain 0, serving as the terminator of
+ // the list of forward-reaching branches.
+ Emit(label->position_);
+ length--;
+ // Now make the label object point to this branch
+ // (this forms a linked list of branches preceding this label).
+ uint32_t branch_id = branches_.size() - 1;
+ label->LinkTo(branch_id);
+ }
+ // Reserve space for the branch.
+ while (length--) {
+ Nop();
+ }
+}
+
+void MipsAssembler::Buncond(MipsLabel* label) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs) {
+ // If lhs = rhs, this can be a NOP.
+ if (Branch::IsNop(condition, lhs, rhs)) {
+ return;
+ }
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target, condition, lhs, rhs);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::Call(MipsLabel* label, Register indirect_reg) {
+ uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
+ branches_.emplace_back(IsR6(), buffer_.Size(), target, indirect_reg);
+ FinalizeLabeledBranch(label);
+}
+
+void MipsAssembler::PromoteBranches() {
+ // Promote short branches to long as necessary.
+ bool changed;
+ do {
+ changed = false;
+ for (auto& branch : branches_) {
+ CHECK(branch.IsResolved());
+ uint32_t delta = branch.PromoteIfNeeded();
+ // If this branch has been promoted and needs to expand in size,
+ // relocate all branches by the expansion size.
+ if (delta) {
+ changed = true;
+ uint32_t expand_location = branch.GetLocation();
+ for (auto& branch2 : branches_) {
+ branch2.Relocate(expand_location, delta);
+ }
+ }
+ }
+ } while (changed);
+
+ // Account for branch expansion by resizing the code buffer
+ // and moving the code in it to its final location.
+ size_t branch_count = branches_.size();
+ if (branch_count > 0) {
+ // Resize.
+ Branch& last_branch = branches_[branch_count - 1];
+ uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
+ uint32_t old_size = buffer_.Size();
+ buffer_.Resize(old_size + size_delta);
+ // Move the code residing between branch placeholders.
+ uint32_t end = old_size;
+ for (size_t i = branch_count; i > 0; ) {
+ Branch& branch = branches_[--i];
+ uint32_t size = end - branch.GetOldEndLocation();
+ buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
+ end = branch.GetOldLocation();
+ }
+ }
+}
+
+// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
+const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] = {
+ // R2 short branches.
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kUncondBranch
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kCondBranch
+ { 5, 2, 0, MipsAssembler::Branch::kOffset16, 0 }, // kCall
+ // R2 long branches.
+ { 9, 3, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongUncondBranch
+ { 10, 4, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCondBranch
+ { 6, 1, 1, MipsAssembler::Branch::kOffset32, 0 }, // kLongCall
+ // R6 short branches.
+ { 1, 0, 1, MipsAssembler::Branch::kOffset28, 2 }, // kR6UncondBranch
+ { 2, 0, 1, MipsAssembler::Branch::kOffset18, 2 }, // kR6CondBranch
+ // Exception: kOffset23 for beqzc/bnezc.
+ { 2, 0, 0, MipsAssembler::Branch::kOffset21, 2 }, // kR6Call
+ // R6 long branches.
+ { 2, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongUncondBranch
+ { 3, 1, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCondBranch
+ { 3, 0, 0, MipsAssembler::Branch::kOffset32, 0 }, // kR6LongCall
+};
+
+// Note: make sure branch_info_[] and mitBranch() are kept synchronized.
+void MipsAssembler::EmitBranch(MipsAssembler::Branch* branch) {
+ CHECK_EQ(overwriting_, true);
+ overwrite_location_ = branch->GetLocation();
+ uint32_t offset = branch->GetOffset();
+ BranchCondition condition = branch->GetCondition();
+ Register lhs = branch->GetLeftRegister();
+ Register rhs = branch->GetRightRegister();
+ switch (branch->GetType()) {
+ // R2 short branches.
+ case Branch::kUncondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ B(offset);
+ Nop(); // TODO: improve by filling the delay slot.
+ break;
+ case Branch::kCondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ EmitBcond(condition, lhs, rhs, offset);
+ Nop(); // TODO: improve by filling the delay slot.
+ break;
+ case Branch::kCall:
+ Nal();
+ Nop(); // TODO: is this NOP really needed here?
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiu(lhs, RA, offset);
+ Jalr(lhs);
+ Nop();
+ break;
+
+ // R2 long branches.
+ case Branch::kLongUncondBranch:
+ // To get the value of the PC register we need to use the NAL instruction.
+ // NAL clobbers the RA register. However, RA must be preserved if the
+ // method is compiled without the entry/exit sequences that would take care
+ // of preserving RA (typically, leaf methods don't preserve RA explicitly).
+ // So, we need to preserve RA in some temporary storage ourselves. The AT
+ // register can't be used for this because we need it to load a constant
+ // which will be added to the value that NAL stores in RA. And we can't
+ // use T9 for this in the context of the JNI compiler, which uses it
+ // as a scratch register (see InterproceduralScratchRegister()).
+ // If we were to add a 32-bit constant to RA using two ADDIU instructions,
+ // we'd also need to use the ROTR instruction, which requires no less than
+ // MIPSR2.
+ // Perhaps, we could use T8 or one of R2's multiplier/divider registers
+ // (LO or HI) or even a floating-point register, but that doesn't seem
+ // like a nice solution. We may want this to work on both R6 and pre-R6.
+ // For now simply use the stack for RA. This should be OK since for the
+ // vast majority of code a short PC-relative branch is sufficient.
+ // TODO: can this be improved?
+ Push(RA);
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(AT, AT, RA);
+ Lw(RA, SP, 0);
+ Jr(AT);
+ DecreaseFrameSize(kMipsWordSize);
+ break;
+ case Branch::kLongCondBranch:
+ // The comment on case 'Branch::kLongUncondBranch' applies here as well.
+ // Note: the opposite condition branch encodes 8 as the distance, which is equal to the
+ // number of instructions skipped:
+ // (PUSH(IncreaseFrameSize(ADDIU) + SW) + NAL + LUI + ORI + ADDU + LW + JR).
+ EmitBcond(Branch::OppositeCondition(condition), lhs, rhs, 8);
+ Push(RA);
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(AT, AT, RA);
+ Lw(RA, SP, 0);
+ Jr(AT);
+ DecreaseFrameSize(kMipsWordSize);
+ break;
+ case Branch::kLongCall:
+ Nal();
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Lui(AT, High16Bits(offset));
+ Ori(AT, AT, Low16Bits(offset));
+ Addu(lhs, AT, RA);
+ Jalr(lhs);
+ Nop();
+ break;
+
+ // R6 short branches.
+ case Branch::kR6UncondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Bc(offset);
+ break;
+ case Branch::kR6CondBranch:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ EmitBcondc(condition, lhs, rhs, offset);
+ Nop(); // TODO: improve by filling the forbidden slot.
+ break;
+ case Branch::kR6Call:
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Addiupc(lhs, offset);
+ Jialc(lhs, 0);
+ break;
+
+ // R6 long branches.
+ case Branch::kR6LongUncondBranch:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kR6LongCondBranch:
+ EmitBcondc(Branch::OppositeCondition(condition), lhs, rhs, 2);
+ offset += (offset & 0x8000) << 1; // Account for sign extension in jic.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(AT, High16Bits(offset));
+ Jic(AT, Low16Bits(offset));
+ break;
+ case Branch::kR6LongCall:
+ offset += (offset & 0x8000) << 1; // Account for sign extension in addiu.
+ CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
+ Auipc(lhs, High16Bits(offset));
+ Addiu(lhs, lhs, Low16Bits(offset));
+ Jialc(lhs, 0);
+ break;
+ }
+ CHECK_EQ(overwrite_location_, branch->GetEndLocation());
+ CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
+}
+
+void MipsAssembler::B(MipsLabel* label) {
+ Buncond(label);
+}
+
+void MipsAssembler::Jalr(MipsLabel* label, Register indirect_reg) {
+ Call(label, indirect_reg);
+}
+
+void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label) {
+ Bcond(label, kCondEQ, rs, rt);
+}
+
+void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label) {
+ Bcond(label, kCondNE, rs, rt);
+}
+
+void MipsAssembler::Beqz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondEQZ, rt);
+}
+
+void MipsAssembler::Bnez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondNEZ, rt);
+}
+
+void MipsAssembler::Bltz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondLTZ, rt);
+}
+
+void MipsAssembler::Bgez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondGEZ, rt);
+}
+
+void MipsAssembler::Blez(Register rt, MipsLabel* label) {
+ Bcond(label, kCondLEZ, rt);
+}
+
+void MipsAssembler::Bgtz(Register rt, MipsLabel* label) {
+ Bcond(label, kCondGTZ, rt);
+}
+
+void MipsAssembler::Blt(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondLT, rs, rt);
+ } else if (!Branch::IsNop(kCondLT, rs, rt)) {
+ // Synthesize the instruction (not available on R2).
+ Slt(AT, rs, rt);
+ Bnez(AT, label);
+ }
+}
+
+void MipsAssembler::Bge(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondGE, rs, rt);
+ } else if (Branch::IsUncond(kCondGE, rs, rt)) {
+ B(label);
+ } else {
+ // Synthesize the instruction (not available on R2).
+ Slt(AT, rs, rt);
+ Beqz(AT, label);
+ }
+}
+
+void MipsAssembler::Bltu(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondLTU, rs, rt);
+ } else if (!Branch::IsNop(kCondLTU, rs, rt)) {
+ // Synthesize the instruction (not available on R2).
+ Sltu(AT, rs, rt);
+ Bnez(AT, label);
+ }
+}
+
+void MipsAssembler::Bgeu(Register rs, Register rt, MipsLabel* label) {
+ if (IsR6()) {
+ Bcond(label, kCondGEU, rs, rt);
+ } else if (Branch::IsUncond(kCondGEU, rs, rt)) {
+ B(label);
} else {
- CHECK(dst.IsDRegister()) << dst;
- LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ // Synthesize the instruction (not available on R2).
+ Sltu(AT, rs, rt);
+ Beqz(AT, label);
}
}
void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base,
int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (type == kLoadDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kLoadSignedByte:
Lb(reg, base, offset);
@@ -488,8 +1617,16 @@ void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register
case kLoadWord:
Lw(reg, base, offset);
break;
- case kLoadWordPair:
- LOG(FATAL) << "UNREACHABLE";
+ case kLoadDoubleword:
+ if (reg == base) {
+ // This will clobber the base when loading the lower register. Since we have to load the
+ // higher register as well, this will fail. Solution: reverse the order.
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ Lw(reg, base, offset);
+ } else {
+ Lw(reg, base, offset);
+ Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
+ }
break;
default:
LOG(FATAL) << "UNREACHABLE";
@@ -497,15 +1634,74 @@ void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register
}
void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
Lwc1(reg, base, offset);
}
-void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) {
- Ldc1(reg, base, offset);
+void MipsAssembler::LoadDFromOffset(FRegister reg, Register base, int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (!IsAligned<kMipsDoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
+ if (offset & 0x7) {
+ if (Is32BitFPU()) {
+ Lwc1(reg, base, offset);
+ Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Lwc1(reg, base, offset);
+ Lw(T8, base, offset + kMipsWordSize);
+ Mthc1(T8, reg);
+ }
+ } else {
+ Ldc1(reg, base, offset);
+ }
+}
+
+void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
+ size_t size) {
+ MipsManagedRegister dst = m_dst.AsMips();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(kMipsWordSize, size) << dst;
+ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(kMipsDoublewordSize, size) << dst;
+ LoadFromOffset(kLoadDoubleword, dst.AsRegisterPairLow(), src_register, src_offset);
+ } else if (dst.IsFRegister()) {
+ if (size == kMipsWordSize) {
+ LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size) << dst;
+ LoadDFromOffset(dst.AsFRegister(), src_register, src_offset);
+ }
+ }
}
void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base,
int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (type == kStoreDoubleword && !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
switch (type) {
case kStoreByte:
Sb(reg, base, offset);
@@ -516,8 +1712,11 @@ void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register
case kStoreWord:
Sw(reg, base, offset);
break;
- case kStoreWordPair:
- LOG(FATAL) << "UNREACHABLE";
+ case kStoreDoubleword:
+ CHECK_NE(reg, base);
+ CHECK_NE(static_cast<Register>(reg + 1), base);
+ Sw(reg, base, offset);
+ Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
break;
default:
LOG(FATAL) << "UNREACHABLE";
@@ -525,11 +1724,40 @@ void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register
}
void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
+ if (!IsInt<16>(offset)) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
Swc1(reg, base, offset);
}
-void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) {
- Sdc1(reg, base, offset);
+void MipsAssembler::StoreDToOffset(FRegister reg, Register base, int32_t offset) {
+ // IsInt<16> must be passed a signed value.
+ if (!IsInt<16>(offset) ||
+ (!IsAligned<kMipsDoublewordSize>(offset) &&
+ !IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
+ LoadConst32(AT, offset);
+ Addu(AT, AT, base);
+ base = AT;
+ offset = 0;
+ }
+
+ if (offset & 0x7) {
+ if (Is32BitFPU()) {
+ Swc1(reg, base, offset);
+ Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
+ } else {
+ // 64-bit FPU.
+ Mfhc1(T8, reg);
+ Swc1(reg, base, offset);
+ Sw(T8, base, offset + kMipsWordSize);
+ }
+ } else {
+ Sdc1(reg, base, offset);
+ }
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -546,7 +1774,7 @@ void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
// Increase frame to required size.
IncreaseFrameSize(frame_size);
- // Push callee saves and return address
+ // Push callee saves and return address.
int stack_offset = frame_size - kFramePointerSize;
StoreToOffset(kStoreWord, RA, SP, stack_offset);
cfi_.RelOffset(DWARFReg(RA), stack_offset);
@@ -569,13 +1797,13 @@ void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
offset += spill.getSize();
} else if (reg.IsCoreRegister()) {
StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
- offset += 4;
+ offset += kMipsWordSize;
} else if (reg.IsFRegister()) {
StoreSToOffset(reg.AsFRegister(), SP, offset);
- offset += 4;
+ offset += kMipsWordSize;
} else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
+ StoreDToOffset(reg.AsOverlappingDRegisterLow(), SP, offset);
+ offset += kMipsDoublewordSize;
}
}
}
@@ -585,7 +1813,7 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
CHECK_ALIGNED(frame_size, kStackAlignment);
cfi_.RememberState();
- // Pop callee saves and return address
+ // Pop callee saves and return address.
int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
for (size_t i = 0; i < callee_save_regs.size(); ++i) {
Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister();
@@ -601,6 +1829,7 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
// Then jump to the return address.
Jr(RA);
+ Nop();
// The CFI should be restored for any code that follows the exit block.
cfi_.RestoreState();
@@ -608,14 +1837,14 @@ void MipsAssembler::RemoveFrame(size_t frame_size,
}
void MipsAssembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, SP, -adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Addiu32(SP, SP, -adjust);
cfi_.AdjustCFAOffset(adjust);
}
void MipsAssembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, SP, adjust);
+ CHECK_ALIGNED(adjust, kFramePointerSize);
+ Addiu32(SP, SP, adjust);
cfi_.AdjustCFAOffset(-adjust);
}
@@ -624,18 +1853,20 @@ void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
if (src.IsNoRegister()) {
CHECK_EQ(0u, size);
} else if (src.IsCoreRegister()) {
- CHECK_EQ(4u, size);
+ CHECK_EQ(kMipsWordSize, size);
StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
} else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
+ CHECK_EQ(kMipsDoublewordSize, size);
StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
- SP, dest.Int32Value() + 4);
+ SP, dest.Int32Value() + kMipsWordSize);
} else if (src.IsFRegister()) {
- StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
- } else {
- CHECK(src.IsDRegister());
- StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ if (size == kMipsWordSize) {
+ StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size);
+ StoreDToOffset(src.AsFRegister(), SP, dest.Int32Value());
+ }
}
}
@@ -655,29 +1886,30 @@ void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
+ LoadConst32(scratch.AsCoreRegister(), imm);
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister mscratch) {
+void MipsAssembler::StoreImmediateToThread32(ThreadOffset<kMipsWordSize> dest, uint32_t imm,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
+ // Is this function even referenced anywhere else in the code?
+ LoadConst32(scratch.AsCoreRegister(), imm);
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
}
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void MipsAssembler::StoreStackPointerToThread32(ThreadOffset<kMipsWordSize> thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -687,14 +1919,15 @@ void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
MipsManagedRegister scratch = mscratch.AsMips();
StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
}
void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) {
+void MipsAssembler::LoadFromThread32(ManagedRegister mdest,
+ ThreadOffset<kMipsWordSize> src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -707,7 +1940,7 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) {
MipsManagedRegister dest = mdest.AsMips();
- CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+ CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
if (kPoisonHeapReferences && unpoison_reference) {
@@ -715,16 +1948,15 @@ void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberO
}
}
-void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
+void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
MipsManagedRegister dest = mdest.AsMips();
- CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest;
+ CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset<4> offs) {
+ ThreadOffset<kMipsWordSize> offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -738,7 +1970,7 @@ void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
}
-void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) {
+void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
MipsManagedRegister dest = mdest.AsMips();
MipsManagedRegister src = msrc.AsMips();
if (!dest.Equals(src)) {
@@ -747,14 +1979,19 @@ void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*s
Move(dest.AsCoreRegister(), src.AsCoreRegister());
} else if (dest.IsFRegister()) {
CHECK(src.IsFRegister()) << src;
- MovS(dest.AsFRegister(), src.AsFRegister());
+ if (size == kMipsWordSize) {
+ MovS(dest.AsFRegister(), src.AsFRegister());
+ } else {
+ CHECK_EQ(kMipsDoublewordSize, size);
+ MovD(dest.AsFRegister(), src.AsFRegister());
+ }
} else if (dest.IsDRegister()) {
CHECK(src.IsDRegister()) << src;
- MovD(dest.AsDRegister(), src.AsDRegister());
+ MovD(dest.AsOverlappingDRegisterLow(), src.AsOverlappingDRegisterLow());
} else {
CHECK(dest.IsRegisterPair()) << dest;
CHECK(src.IsRegisterPair()) << src;
- // Ensure that the first move doesn't clobber the input of the second
+ // Ensure that the first move doesn't clobber the input of the second.
if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
@@ -766,8 +2003,7 @@ void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*s
}
}
-void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
@@ -775,8 +2011,8 @@ void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src,
}
void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset<4> thr_offs,
- ManagedRegister mscratch) {
+ ThreadOffset<kMipsWordSize> thr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -785,9 +2021,9 @@ void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<kMipsWordSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -796,26 +2032,25 @@ void MipsAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
S1, thr_offs.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
+void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
+ CHECK(size == kMipsWordSize || size == kMipsDoublewordSize) << size;
+ if (size == kMipsWordSize) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
+ } else if (size == kMipsDoublewordSize) {
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + kMipsWordSize);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
}
}
void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
ManagedRegister mscratch, size_t size) {
Register scratch = mscratch.AsMips().AsCoreRegister();
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value());
StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
}
@@ -823,107 +2058,117 @@ void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_
void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
ManagedRegister mscratch, size_t size) {
Register scratch = mscratch.AsMips().AsCoreRegister();
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ FrameOffset src_base ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
ManagedRegister src, Offset src_offset,
ManagedRegister mscratch, size_t size) {
- CHECK_EQ(size, 4u);
+ CHECK_EQ(size, kMipsWordSize);
Register scratch = mscratch.AsMips().AsCoreRegister();
LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value());
StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value());
}
-void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
+ Offset dest_offset ATTRIBUTE_UNUSED,
+ FrameOffset src ATTRIBUTE_UNUSED,
+ Offset src_offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::MemoryBarrier(ManagedRegister) {
- UNIMPLEMENTED(FATAL) << "no mips implementation";
+ // TODO: sync?
+ UNIMPLEMENTED(FATAL) << "no MIPS implementation";
}
void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
CHECK(out_reg.IsCoreRegister()) << out_reg;
if (null_allowed) {
- Label null_arg;
+ MipsLabel null_arg;
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ // E.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset).
if (in_reg.IsNoRegister()) {
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
SP, handle_scope_offset.Int32Value());
in_reg = out_reg;
}
if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0);
+ LoadConst32(out_reg.AsCoreRegister(), 0);
}
- EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ Beqz(in_reg.AsCoreRegister(), &null_arg);
+ Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
}
void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
if (null_allowed) {
- Label null_arg;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- handle_scope_offset.Int32Value());
+ MipsLabel null_arg;
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
// Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
// the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true);
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
- Bind(&null_arg, false);
+ // E.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset).
+ Beqz(scratch.AsCoreRegister(), &null_arg);
+ Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg);
} else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
}
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
}
// Given a handle scope entry, load the associated reference.
void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
+ ManagedRegister min_reg) {
MipsManagedRegister out_reg = mout_reg.AsMips();
MipsManagedRegister in_reg = min_reg.AsMips();
CHECK(out_reg.IsCoreRegister()) << out_reg;
CHECK(in_reg.IsCoreRegister()) << in_reg;
- Label null_arg;
+ MipsLabel null_arg;
if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0);
+ LoadConst32(out_reg.AsCoreRegister(), 0);
}
- EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true);
+ Beqz(in_reg.AsCoreRegister(), &null_arg);
LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
in_reg.AsCoreRegister(), 0);
- Bind(&null_arg, false);
+ Bind(&null_arg);
}
-void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
+void MipsAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
}
-void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
+void MipsAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
+ bool could_be_null ATTRIBUTE_UNUSED) {
+ // TODO: not validating references.
}
void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
@@ -934,22 +2179,24 @@ void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister m
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
base.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- // TODO: place reference map on call
+ Nop();
+ // TODO: place reference map on call.
}
void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
// Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, base.Int32Value());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
scratch.AsCoreRegister(), offset.Int32Value());
Jalr(scratch.AsCoreRegister());
- // TODO: place reference map on call
+ Nop();
+ // TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*mscratch*/) {
+void MipsAssembler::CallFromThread32(ThreadOffset<kMipsWordSize> offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
@@ -958,35 +2205,38 @@ void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
}
void MipsAssembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*mscratch*/) {
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
StoreToOffset(kStoreWord, S1, SP, offset.Int32Value());
}
void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
MipsManagedRegister scratch = mscratch.AsMips();
- MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
+ exception_blocks_.emplace_back(scratch, stack_adjust);
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- S1, Thread::ExceptionOffset<4>().Int32Value());
- EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false);
-}
-
-void MipsExceptionSlowPath::Emit(Assembler* sasm) {
- MipsAssembler* sp_asm = down_cast<MipsAssembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_, false);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception object as argument
- // Don't care about preserving A0 as this call won't return
- __ Move(A0, scratch_.AsCoreRegister());
- // Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
- __ Jr(T9);
- // Call never returns
- __ Break();
-#undef __
+ S1, Thread::ExceptionOffset<kMipsWordSize>().Int32Value());
+ // TODO: on MIPS32R6 prefer Bnezc(scratch.AsCoreRegister(), slow.Entry());
+ // as the NAL instruction (occurring in long R2 branches) may become deprecated.
+ // For now use common for R2 and R6 instructions as this code must execute on both.
+ Bnez(scratch.AsCoreRegister(), exception_blocks_.back().Entry());
+}
+
+void MipsAssembler::EmitExceptionPoll(MipsExceptionSlowPath* exception) {
+ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving A0 as this call won't return.
+ CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
+ Move(A0, exception->scratch_.AsCoreRegister());
+ // Set up call to Thread::Current()->pDeliverException.
+ LoadFromOffset(kLoadWord, T9, S1,
+ QUICK_ENTRYPOINT_OFFSET(kMipsWordSize, pDeliverException).Int32Value());
+ Jr(T9);
+ Nop();
+
+ // Call never returns.
+ Break();
}
} // namespace mips
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 378a59cb3e..aa187b812b 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -17,54 +17,111 @@
#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#include <utility>
#include <vector>
+#include "arch/mips/instruction_set_features_mips.h"
#include "base/macros.h"
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
-#include "utils/assembler.h"
#include "offsets.h"
+#include "utils/assembler.h"
+#include "utils/label.h"
namespace art {
namespace mips {
+static constexpr size_t kMipsWordSize = 4;
+static constexpr size_t kMipsDoublewordSize = 8;
+
enum LoadOperandType {
kLoadSignedByte,
kLoadUnsignedByte,
kLoadSignedHalfword,
kLoadUnsignedHalfword,
kLoadWord,
- kLoadWordPair,
- kLoadSWord,
- kLoadDWord
+ kLoadDoubleword
};
enum StoreOperandType {
kStoreByte,
kStoreHalfword,
kStoreWord,
- kStoreWordPair,
- kStoreSWord,
- kStoreDWord
+ kStoreDoubleword
+};
+
+class MipsLabel : public Label {
+ public:
+ MipsLabel() : prev_branch_id_plus_one_(0) {}
+
+ MipsLabel(MipsLabel&& src)
+ : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {}
+
+ private:
+ uint32_t prev_branch_id_plus_one_; // To get distance from preceding branch, if any.
+
+ friend class MipsAssembler;
+ DISALLOW_COPY_AND_ASSIGN(MipsLabel);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null.
+class MipsExceptionSlowPath {
+ public:
+ explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ MipsExceptionSlowPath(MipsExceptionSlowPath&& src)
+ : scratch_(std::move(src.scratch_)),
+ stack_adjust_(std::move(src.stack_adjust_)),
+ exception_entry_(std::move(src.exception_entry_)) {}
+
+ private:
+ MipsLabel* Entry() { return &exception_entry_; }
+ const MipsManagedRegister scratch_;
+ const size_t stack_adjust_;
+ MipsLabel exception_entry_;
+
+ friend class MipsAssembler;
+ DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
class MipsAssembler FINAL : public Assembler {
public:
- MipsAssembler() {}
- virtual ~MipsAssembler() {}
+ explicit MipsAssembler(const MipsInstructionSetFeatures* instruction_set_features = nullptr)
+ : overwriting_(false),
+ overwrite_location_(0),
+ last_position_adjustment_(0),
+ last_old_position_(0),
+ last_branch_id_(0),
+ isa_features_(instruction_set_features) {}
+
+ virtual ~MipsAssembler() {
+ for (auto& branch : branches_) {
+ CHECK(branch.IsResolved());
+ }
+ }
// Emit Machine Instructions.
- void Add(Register rd, Register rs, Register rt);
void Addu(Register rd, Register rs, Register rt);
- void Addi(Register rt, Register rs, uint16_t imm16);
void Addiu(Register rt, Register rs, uint16_t imm16);
- void Sub(Register rd, Register rs, Register rt);
void Subu(Register rd, Register rs, Register rt);
- void Mult(Register rs, Register rt);
- void Multu(Register rs, Register rt);
- void Div(Register rs, Register rt);
- void Divu(Register rs, Register rt);
+
+ void MultR2(Register rs, Register rt); // R2
+ void MultuR2(Register rs, Register rt); // R2
+ void DivR2(Register rs, Register rt); // R2
+ void DivuR2(Register rs, Register rt); // R2
+ void MulR2(Register rd, Register rs, Register rt); // R2
+ void DivR2(Register rd, Register rs, Register rt); // R2
+ void ModR2(Register rd, Register rs, Register rt); // R2
+ void DivuR2(Register rd, Register rs, Register rt); // R2
+ void ModuR2(Register rd, Register rs, Register rt); // R2
+ void MulR6(Register rd, Register rs, Register rt); // R6
+ void MuhuR6(Register rd, Register rs, Register rt); // R6
+ void DivR6(Register rd, Register rs, Register rt); // R6
+ void ModR6(Register rd, Register rs, Register rt); // R6
+ void DivuR6(Register rd, Register rs, Register rt); // R6
+ void ModuR6(Register rd, Register rs, Register rt); // R6
void And(Register rd, Register rs, Register rt);
void Andi(Register rt, Register rs, uint16_t imm16);
@@ -74,12 +131,15 @@ class MipsAssembler FINAL : public Assembler {
void Xori(Register rt, Register rs, uint16_t imm16);
void Nor(Register rd, Register rs, Register rt);
- void Sll(Register rd, Register rs, int shamt);
- void Srl(Register rd, Register rs, int shamt);
- void Sra(Register rd, Register rs, int shamt);
- void Sllv(Register rd, Register rs, Register rt);
- void Srlv(Register rd, Register rs, Register rt);
- void Srav(Register rd, Register rs, Register rt);
+ void Seb(Register rd, Register rt); // R2+
+ void Seh(Register rd, Register rt); // R2+
+
+ void Sll(Register rd, Register rt, int shamt);
+ void Srl(Register rd, Register rt, int shamt);
+ void Sra(Register rd, Register rt, int shamt);
+ void Sllv(Register rd, Register rt, Register rs);
+ void Srlv(Register rd, Register rt, Register rs);
+ void Srav(Register rd, Register rt, Register rs);
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
@@ -87,8 +147,9 @@ class MipsAssembler FINAL : public Assembler {
void Lbu(Register rt, Register rs, uint16_t imm16);
void Lhu(Register rt, Register rs, uint16_t imm16);
void Lui(Register rt, uint16_t imm16);
- void Mfhi(Register rd);
- void Mflo(Register rd);
+ void Sync(uint32_t stype);
+ void Mfhi(Register rd); // R2
+ void Mflo(Register rd); // R2
void Sb(Register rt, Register rs, uint16_t imm16);
void Sh(Register rt, Register rs, uint16_t imm16);
@@ -99,81 +160,138 @@ class MipsAssembler FINAL : public Assembler {
void Slti(Register rt, Register rs, uint16_t imm16);
void Sltiu(Register rt, Register rs, uint16_t imm16);
- void Beq(Register rt, Register rs, uint16_t imm16);
- void Bne(Register rt, Register rs, uint16_t imm16);
- void J(uint32_t address);
- void Jal(uint32_t address);
- void Jr(Register rs);
+ void B(uint16_t imm16);
+ void Beq(Register rs, Register rt, uint16_t imm16);
+ void Bne(Register rs, Register rt, uint16_t imm16);
+ void Beqz(Register rt, uint16_t imm16);
+ void Bnez(Register rt, uint16_t imm16);
+ void Bltz(Register rt, uint16_t imm16);
+ void Bgez(Register rt, uint16_t imm16);
+ void Blez(Register rt, uint16_t imm16);
+ void Bgtz(Register rt, uint16_t imm16);
+ void J(uint32_t addr26);
+ void Jal(uint32_t addr26);
+ void Jalr(Register rd, Register rs);
void Jalr(Register rs);
+ void Jr(Register rs);
+ void Nal();
+ void Auipc(Register rs, uint16_t imm16); // R6
+ void Addiupc(Register rs, uint32_t imm19); // R6
+ void Bc(uint32_t imm26); // R6
+ void Jic(Register rt, uint16_t imm16); // R6
+ void Jialc(Register rt, uint16_t imm16); // R6
+ void Bltc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bltzc(Register rt, uint16_t imm16); // R6
+ void Bgtzc(Register rt, uint16_t imm16); // R6
+ void Bgec(Register rs, Register rt, uint16_t imm16); // R6
+ void Bgezc(Register rt, uint16_t imm16); // R6
+ void Blezc(Register rt, uint16_t imm16); // R6
+ void Bltuc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bgeuc(Register rs, Register rt, uint16_t imm16); // R6
+ void Beqc(Register rs, Register rt, uint16_t imm16); // R6
+ void Bnec(Register rs, Register rt, uint16_t imm16); // R6
+ void Beqzc(Register rs, uint32_t imm21); // R6
+ void Bnezc(Register rs, uint32_t imm21); // R6
void AddS(FRegister fd, FRegister fs, FRegister ft);
void SubS(FRegister fd, FRegister fs, FRegister ft);
void MulS(FRegister fd, FRegister fs, FRegister ft);
void DivS(FRegister fd, FRegister fs, FRegister ft);
- void AddD(DRegister fd, DRegister fs, DRegister ft);
- void SubD(DRegister fd, DRegister fs, DRegister ft);
- void MulD(DRegister fd, DRegister fs, DRegister ft);
- void DivD(DRegister fd, DRegister fs, DRegister ft);
+ void AddD(FRegister fd, FRegister fs, FRegister ft);
+ void SubD(FRegister fd, FRegister fs, FRegister ft);
+ void MulD(FRegister fd, FRegister fs, FRegister ft);
+ void DivD(FRegister fd, FRegister fs, FRegister ft);
void MovS(FRegister fd, FRegister fs);
- void MovD(DRegister fd, DRegister fs);
+ void MovD(FRegister fd, FRegister fs);
+ void NegS(FRegister fd, FRegister fs);
+ void NegD(FRegister fd, FRegister fs);
+
+ void Cvtsw(FRegister fd, FRegister fs);
+ void Cvtdw(FRegister fd, FRegister fs);
+ void Cvtsd(FRegister fd, FRegister fs);
+ void Cvtds(FRegister fd, FRegister fs);
void Mfc1(Register rt, FRegister fs);
- void Mtc1(FRegister ft, Register rs);
+ void Mtc1(Register rt, FRegister fs);
+ void Mfhc1(Register rt, FRegister fs);
+ void Mthc1(Register rt, FRegister fs);
void Lwc1(FRegister ft, Register rs, uint16_t imm16);
- void Ldc1(DRegister ft, Register rs, uint16_t imm16);
+ void Ldc1(FRegister ft, Register rs, uint16_t imm16);
void Swc1(FRegister ft, Register rs, uint16_t imm16);
- void Sdc1(DRegister ft, Register rs, uint16_t imm16);
+ void Sdc1(FRegister ft, Register rs, uint16_t imm16);
void Break();
void Nop();
- void Move(Register rt, Register rs);
- void Clear(Register rt);
- void Not(Register rt, Register rs);
- void Mul(Register rd, Register rs, Register rt);
- void Div(Register rd, Register rs, Register rt);
- void Rem(Register rd, Register rs, Register rt);
-
- void AddConstant(Register rt, Register rs, int32_t value);
- void LoadImmediate(Register rt, int32_t value);
+ void Move(Register rd, Register rs);
+ void Clear(Register rd);
+ void Not(Register rd, Register rs);
+
+ // Higher level composite instructions.
+ void LoadConst32(Register rd, int32_t value);
+ void LoadConst64(Register reg_hi, Register reg_lo, int64_t value);
+ void LoadDConst64(FRegister rd, int64_t value, Register temp);
+ void LoadSConst32(FRegister r, int32_t value, Register temp);
+ void StoreConst32ToOffset(int32_t value, Register base, int32_t offset, Register temp);
+ void StoreConst64ToOffset(int64_t value, Register base, int32_t offset, Register temp);
+ void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT);
+
+ // These will generate R2 branches or R6 branches as appropriate.
+ void Bind(MipsLabel* label);
+ void B(MipsLabel* label);
+ void Jalr(MipsLabel* label, Register indirect_reg);
+ void Beq(Register rs, Register rt, MipsLabel* label);
+ void Bne(Register rs, Register rt, MipsLabel* label);
+ void Beqz(Register rt, MipsLabel* label);
+ void Bnez(Register rt, MipsLabel* label);
+ void Bltz(Register rt, MipsLabel* label);
+ void Bgez(Register rt, MipsLabel* label);
+ void Blez(Register rt, MipsLabel* label);
+ void Bgtz(Register rt, MipsLabel* label);
+ void Blt(Register rs, Register rt, MipsLabel* label);
+ void Bge(Register rs, Register rt, MipsLabel* label);
+ void Bltu(Register rs, Register rt, MipsLabel* label);
+ void Bgeu(Register rs, Register rt, MipsLabel* label);
void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
- void LoadDFromOffset(DRegister reg, Register base, int32_t offset);
+ void LoadDFromOffset(FRegister reg, Register base, int32_t offset);
void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
void StoreSToOffset(FRegister reg, Register base, int32_t offset);
- void StoreDToOffset(DRegister reg, Register base, int32_t offset);
+ void StoreDToOffset(FRegister reg, Register base, int32_t offset);
// Emit data (e.g. encoded instruction or immediate) to the instruction stream.
- void Emit(int32_t value);
- void EmitBranch(Register rt, Register rs, Label* label, bool equal);
- void EmitJump(Label* label, bool link);
- void Bind(Label* label, bool is_jump);
+ void Emit(uint32_t value);
+
+ // Push/pop composite routines.
+ void Push(Register rs);
+ void Pop(Register rd);
+ void PopAndReturn(Register rd, Register rt);
void Bind(Label* label) OVERRIDE {
- Bind(label, false);
+ Bind(down_cast<MipsLabel*>(label));
}
- void Jump(Label* label) OVERRIDE {
- EmitJump(label, false);
+ void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
+ UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
}
//
- // Overridden common assembler high-level functionality
+ // Overridden common assembler high-level functionality.
//
- // Emit code that will create an activation on the stack
+ // Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
- // Emit code that will remove an activation from the stack
+ // Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
OVERRIDE;
void IncreaseFrameSize(size_t adjust) OVERRIDE;
void DecreaseFrameSize(size_t adjust) OVERRIDE;
- // Store routines
+ // Store routines.
void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
@@ -191,7 +309,7 @@ class MipsAssembler FINAL : public Assembler {
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
- // Load routines
+ // Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
void LoadFromThread32(ManagedRegister mdest, ThreadOffset<4> src, size_t size) OVERRIDE;
@@ -205,7 +323,7 @@ class MipsAssembler FINAL : public Assembler {
void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset<4> offs) OVERRIDE;
- // Copying routines
+ // Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
@@ -235,13 +353,13 @@ class MipsAssembler FINAL : public Assembler {
void MemoryBarrier(ManagedRegister) OVERRIDE;
- // Sign extension
+ // Sign extension.
void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
- // Zero extension
+ // Zero extension.
void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
- // Exploit fast access in managed code to Thread::Current()
+ // Exploit fast access in managed code to Thread::Current().
void GetCurrentThread(ManagedRegister tr) OVERRIDE;
void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
@@ -257,7 +375,7 @@ class MipsAssembler FINAL : public Assembler {
void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
ManagedRegister mscratch, bool null_allowed) OVERRIDE;
- // src holds a handle scope entry (Object**) load this into dst
+ // src holds a handle scope entry (Object**) load this into dst.
void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
@@ -265,7 +383,7 @@ class MipsAssembler FINAL : public Assembler {
void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
- // Call to address held at [base+offset]
+ // Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void CallFromThread32(ThreadOffset<4> offset, ManagedRegister mscratch) OVERRIDE;
@@ -274,41 +392,251 @@ class MipsAssembler FINAL : public Assembler {
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+ // Emit slow paths queued during assembly and promote short branches to long if needed.
+ void FinalizeCode() OVERRIDE;
+
+ // Emit branches and finalize all instructions.
+ void FinalizeInstructions(const MemoryRegion& region);
+
+ // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS,
+ // must be used instead of MipsLabel::GetPosition()).
+ uint32_t GetLabelLocation(MipsLabel* label) const;
+
+ // Get the final position of a label after local fixup based on the old position
+ // recorded before FinalizeCode().
+ uint32_t GetAdjustedPosition(uint32_t old_position);
+
+ enum BranchCondition {
+ kCondLT,
+ kCondGE,
+ kCondLE,
+ kCondGT,
+ kCondLTZ,
+ kCondGEZ,
+ kCondLEZ,
+ kCondGTZ,
+ kCondEQ,
+ kCondNE,
+ kCondEQZ,
+ kCondNEZ,
+ kCondLTU,
+ kCondGEU,
+ kUncond,
+ };
+ friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
+
private:
+ class Branch {
+ public:
+ enum Type {
+ // R2 short branches.
+ kUncondBranch,
+ kCondBranch,
+ kCall,
+ // R2 long branches.
+ kLongUncondBranch,
+ kLongCondBranch,
+ kLongCall,
+ // R6 short branches.
+ kR6UncondBranch,
+ kR6CondBranch,
+ kR6Call,
+ // R6 long branches.
+ kR6LongUncondBranch,
+ kR6LongCondBranch,
+ kR6LongCall,
+ };
+ // Bit sizes of offsets defined as enums to minimize chance of typos.
+ enum OffsetBits {
+ kOffset16 = 16,
+ kOffset18 = 18,
+ kOffset21 = 21,
+ kOffset23 = 23,
+ kOffset28 = 28,
+ kOffset32 = 32,
+ };
+
+ static constexpr uint32_t kUnresolved = 0xffffffff; // Unresolved target_
+ static constexpr int32_t kMaxBranchLength = 32;
+ static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
+
+ struct BranchInfo {
+ // Branch length as a number of 4-byte-long instructions.
+ uint32_t length;
+ // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's
+ // PC-relative offset (or its most significant 16-bit half, which goes first).
+ uint32_t instr_offset;
+ // Different MIPS instructions with PC-relative offsets apply said offsets to slightly
+ // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte
+ // instructions) from the instruction containing the offset.
+ uint32_t pc_org;
+ // How large (in bits) a PC-relative offset can be for a given type of branch (kR6CondBranch
+ // is an exception: use kOffset23 for beqzc/bnezc).
+ OffsetBits offset_size;
+ // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift
+ // count.
+ int offset_shift;
+ };
+ static const BranchInfo branch_info_[/* Type */];
+
+ // Unconditional branch.
+ Branch(bool is_r6, uint32_t location, uint32_t target);
+ // Conditional branch.
+ Branch(bool is_r6,
+ uint32_t location,
+ uint32_t target,
+ BranchCondition condition,
+ Register lhs_reg,
+ Register rhs_reg = ZERO);
+ // Call (branch and link) that stores the target address in a given register (i.e. T9).
+ Branch(bool is_r6, uint32_t location, uint32_t target, Register indirect_reg);
+
+ // Some conditional branches with lhs = rhs are effectively NOPs, while some
+ // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
+ // So, we need a way to identify such branches in order to emit no instructions for them
+ // or change them to unconditional.
+ static bool IsNop(BranchCondition condition, Register lhs, Register rhs);
+ static bool IsUncond(BranchCondition condition, Register lhs, Register rhs);
+
+ static BranchCondition OppositeCondition(BranchCondition cond);
+
+ Type GetType() const;
+ BranchCondition GetCondition() const;
+ Register GetLeftRegister() const;
+ Register GetRightRegister() const;
+ uint32_t GetTarget() const;
+ uint32_t GetLocation() const;
+ uint32_t GetOldLocation() const;
+ uint32_t GetLength() const;
+ uint32_t GetOldLength() const;
+ uint32_t GetSize() const;
+ uint32_t GetOldSize() const;
+ uint32_t GetEndLocation() const;
+ uint32_t GetOldEndLocation() const;
+ bool IsLong() const;
+ bool IsResolved() const;
+
+ // Returns the bit size of the signed offset that the branch instruction can handle.
+ OffsetBits GetOffsetSize() const;
+
+ // Calculates the distance between two byte locations in the assembler buffer and
+ // returns the number of bits needed to represent the distance as a signed integer.
+ //
+ // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc),
+ // and 26 (bc) bits, which are additionally shifted left 2 positions at run time.
+ //
+ // Composite branches (made of several instructions) with longer reach have 32-bit
+ // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first).
+ // The composite branches cover the range of PC + +/-2GB.
+ //
+ // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special
+ // case with the addiu instruction and a 16 bit offset.
+ static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target);
+
+ // Resolve a branch when the target is known.
+ void Resolve(uint32_t target);
+
+ // Relocate a branch by a given delta if needed due to expansion of this or another
+ // branch at a given location by this delta (just changes location_ and target_).
+ void Relocate(uint32_t expand_location, uint32_t delta);
+
+ // If the branch is short, changes its type to long.
+ void PromoteToLong();
+
+ // If necessary, updates the type by promoting a short branch to a long branch
+ // based on the branch location and target. Returns the amount (in bytes) by
+ // which the branch size has increased.
+ // max_short_distance caps the maximum distance between location_ and target_
+ // that is allowed for short branches. This is for debugging/testing purposes.
+ // max_short_distance = 0 forces all short branches to become long.
+ // Use the implicit default argument when not debugging/testing.
+ uint32_t PromoteIfNeeded(uint32_t max_short_distance = std::numeric_limits<uint32_t>::max());
+
+ // Returns the location of the instruction(s) containing the offset.
+ uint32_t GetOffsetLocation() const;
+
+ // Calculates and returns the offset ready for encoding in the branch instruction(s).
+ uint32_t GetOffset() const;
+
+ private:
+ // Completes branch construction by determining and recording its type.
+ void InitializeType(bool is_call, bool is_r6);
+ // Helper for the above.
+ void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
+
+ uint32_t old_location_; // Offset into assembler buffer in bytes.
+ uint32_t location_; // Offset into assembler buffer in bytes.
+ uint32_t target_; // Offset into assembler buffer in bytes.
+
+ uint32_t lhs_reg_ : 5; // Left-hand side register in conditional branches or
+ // indirect call register.
+ uint32_t rhs_reg_ : 5; // Right-hand side register in conditional branches.
+ BranchCondition condition_ : 5; // Condition for conditional branches.
+
+ Type type_ : 5; // Current type of the branch.
+ Type old_type_ : 5; // Initial type of the branch.
+ };
+ friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
+ friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
+
void EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
void EmitI(int opcode, Register rs, Register rt, uint16_t imm);
- void EmitJ(int opcode, int address);
+ void EmitI21(int opcode, Register rs, uint32_t imm21);
+ void EmitI26(int opcode, uint32_t imm26);
void EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
void EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
-
- int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
- int DecodeBranchOffset(int32_t inst, bool is_jump);
-
- FRegister ConvertDRegToFReg(DRegister reg) {
- return static_cast<FRegister>(reg * 2);
- }
- Register ConvertDRegToReg(DRegister reg) {
- return static_cast<Register>(reg * 2);
+ void EmitBcond(BranchCondition cond, Register rs, Register rt, uint16_t imm16);
+ void EmitBcondc(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21); // R6
+
+ void Buncond(MipsLabel* label);
+ void Bcond(MipsLabel* label, BranchCondition condition, Register lhs, Register rhs = ZERO);
+ void Call(MipsLabel* label, Register indirect_reg);
+ void FinalizeLabeledBranch(MipsLabel* label);
+
+ Branch* GetBranch(uint32_t branch_id);
+ const Branch* GetBranch(uint32_t branch_id) const;
+
+ void PromoteBranches();
+ void EmitBranch(Branch* branch);
+ void EmitBranches();
+
+ // Emits exception block.
+ void EmitExceptionPoll(MipsExceptionSlowPath* exception);
+
+ bool IsR6() const {
+ if (isa_features_ != nullptr) {
+ return isa_features_->IsR6();
+ } else {
+ return false;
+ }
}
- Register ConvertFRegToReg(FRegister reg) {
- return static_cast<Register>(reg);
- }
- FRegister ConvertRegToFReg(Register reg) {
- return static_cast<FRegister>(reg);
+
+ bool Is32BitFPU() const {
+ if (isa_features_ != nullptr) {
+ return isa_features_->Is32BitFloatingPoint();
+ } else {
+ return true;
+ }
}
- DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
-};
+ // List of exception blocks to generate at the end of the code cache.
+ std::vector<MipsExceptionSlowPath> exception_blocks_;
-// Slowpath entered when Thread::Current()->_exception is non-null
-class MipsExceptionSlowPath FINAL : public SlowPath {
- public:
- MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const MipsManagedRegister scratch_;
- const size_t stack_adjust_;
+ std::vector<Branch> branches_;
+
+ // Whether appending instructions at the end of the buffer or overwriting the existing ones.
+ bool overwriting_;
+ // The current overwrite location.
+ uint32_t overwrite_location_;
+
+ // Data for AdjustedPosition(), see the description there.
+ uint32_t last_position_adjustment_;
+ uint32_t last_old_position_;
+ uint32_t last_branch_id_;
+
+ const MipsInstructionSetFeatures* isa_features_;
+
+ DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
};
} // namespace mips
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
new file mode 100644
index 0000000000..063d8bd825
--- /dev/null
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -0,0 +1,1324 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips.h"
+
+#include <map>
+
+#include "base/stl_util.h"
+#include "utils/assembler_test.h"
+
+namespace art {
+
+struct MIPSCpuRegisterCompare {
+ bool operator()(const mips::Register& a, const mips::Register& b) const {
+ return a < b;
+ }
+};
+
+class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
+ mips::Register,
+ mips::FRegister,
+ uint32_t> {
+ public:
+ typedef AssemblerTest<mips::MipsAssembler, mips::Register, mips::FRegister, uint32_t> Base;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "mips";
+ }
+
+ std::string GetAssemblerParameters() OVERRIDE {
+ return " --no-warn -32 -march=mips32r2";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mmips:isa32r2";
+ }
+
+ void SetUpHelpers() OVERRIDE {
+ if (registers_.size() == 0) {
+ registers_.push_back(new mips::Register(mips::ZERO));
+ registers_.push_back(new mips::Register(mips::AT));
+ registers_.push_back(new mips::Register(mips::V0));
+ registers_.push_back(new mips::Register(mips::V1));
+ registers_.push_back(new mips::Register(mips::A0));
+ registers_.push_back(new mips::Register(mips::A1));
+ registers_.push_back(new mips::Register(mips::A2));
+ registers_.push_back(new mips::Register(mips::A3));
+ registers_.push_back(new mips::Register(mips::T0));
+ registers_.push_back(new mips::Register(mips::T1));
+ registers_.push_back(new mips::Register(mips::T2));
+ registers_.push_back(new mips::Register(mips::T3));
+ registers_.push_back(new mips::Register(mips::T4));
+ registers_.push_back(new mips::Register(mips::T5));
+ registers_.push_back(new mips::Register(mips::T6));
+ registers_.push_back(new mips::Register(mips::T7));
+ registers_.push_back(new mips::Register(mips::S0));
+ registers_.push_back(new mips::Register(mips::S1));
+ registers_.push_back(new mips::Register(mips::S2));
+ registers_.push_back(new mips::Register(mips::S3));
+ registers_.push_back(new mips::Register(mips::S4));
+ registers_.push_back(new mips::Register(mips::S5));
+ registers_.push_back(new mips::Register(mips::S6));
+ registers_.push_back(new mips::Register(mips::S7));
+ registers_.push_back(new mips::Register(mips::T8));
+ registers_.push_back(new mips::Register(mips::T9));
+ registers_.push_back(new mips::Register(mips::K0));
+ registers_.push_back(new mips::Register(mips::K1));
+ registers_.push_back(new mips::Register(mips::GP));
+ registers_.push_back(new mips::Register(mips::SP));
+ registers_.push_back(new mips::Register(mips::FP));
+ registers_.push_back(new mips::Register(mips::RA));
+
+ secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
+ secondary_register_names_.emplace(mips::Register(mips::AT), "at");
+ secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
+ secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
+ secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
+ secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
+ secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
+ secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
+ secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
+ secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
+ secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
+ secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
+ secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
+ secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
+ secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
+ secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
+ secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
+ secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
+ secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
+ secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
+ secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
+ secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
+ secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
+ secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
+ secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
+ secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
+ secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
+ secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
+ secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
+ secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
+ secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
+ secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
+
+ fp_registers_.push_back(new mips::FRegister(mips::F0));
+ fp_registers_.push_back(new mips::FRegister(mips::F1));
+ fp_registers_.push_back(new mips::FRegister(mips::F2));
+ fp_registers_.push_back(new mips::FRegister(mips::F3));
+ fp_registers_.push_back(new mips::FRegister(mips::F4));
+ fp_registers_.push_back(new mips::FRegister(mips::F5));
+ fp_registers_.push_back(new mips::FRegister(mips::F6));
+ fp_registers_.push_back(new mips::FRegister(mips::F7));
+ fp_registers_.push_back(new mips::FRegister(mips::F8));
+ fp_registers_.push_back(new mips::FRegister(mips::F9));
+ fp_registers_.push_back(new mips::FRegister(mips::F10));
+ fp_registers_.push_back(new mips::FRegister(mips::F11));
+ fp_registers_.push_back(new mips::FRegister(mips::F12));
+ fp_registers_.push_back(new mips::FRegister(mips::F13));
+ fp_registers_.push_back(new mips::FRegister(mips::F14));
+ fp_registers_.push_back(new mips::FRegister(mips::F15));
+ fp_registers_.push_back(new mips::FRegister(mips::F16));
+ fp_registers_.push_back(new mips::FRegister(mips::F17));
+ fp_registers_.push_back(new mips::FRegister(mips::F18));
+ fp_registers_.push_back(new mips::FRegister(mips::F19));
+ fp_registers_.push_back(new mips::FRegister(mips::F20));
+ fp_registers_.push_back(new mips::FRegister(mips::F21));
+ fp_registers_.push_back(new mips::FRegister(mips::F22));
+ fp_registers_.push_back(new mips::FRegister(mips::F23));
+ fp_registers_.push_back(new mips::FRegister(mips::F24));
+ fp_registers_.push_back(new mips::FRegister(mips::F25));
+ fp_registers_.push_back(new mips::FRegister(mips::F26));
+ fp_registers_.push_back(new mips::FRegister(mips::F27));
+ fp_registers_.push_back(new mips::FRegister(mips::F28));
+ fp_registers_.push_back(new mips::FRegister(mips::F29));
+ fp_registers_.push_back(new mips::FRegister(mips::F30));
+ fp_registers_.push_back(new mips::FRegister(mips::F31));
+ }
+ }
+
+ void TearDown() OVERRIDE {
+ AssemblerTest::TearDown();
+ STLDeleteElements(&registers_);
+ STLDeleteElements(&fp_registers_);
+ }
+
+ std::vector<mips::Register*> GetRegisters() OVERRIDE {
+ return registers_;
+ }
+
+ std::vector<mips::FRegister*> GetFPRegisters() OVERRIDE {
+ return fp_registers_;
+ }
+
+ uint32_t CreateImmediate(int64_t imm_value) OVERRIDE {
+ return imm_value;
+ }
+
+ std::string GetSecondaryRegisterName(const mips::Register& reg) OVERRIDE {
+ CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
+ return secondary_register_names_[reg];
+ }
+
+ std::string RepeatInsn(size_t count, const std::string& insn) {
+ std::string result;
+ for (; count != 0u; --count) {
+ result += insn;
+ }
+ return result;
+ }
+
+ private:
+ std::vector<mips::Register*> registers_;
+ std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
+
+ std::vector<mips::FRegister*> fp_registers_;
+};
+
+
+TEST_F(AssemblerMIPSTest, Toolchain) {
+ EXPECT_TRUE(CheckTools());
+}
+
+#define __ GetAssembler()->
+
+TEST_F(AssemblerMIPSTest, Addu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Addu, "addu ${reg1}, ${reg2}, ${reg3}"), "Addu");
+}
+
+TEST_F(AssemblerMIPSTest, Addiu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Addiu, -16, "addiu ${reg1}, ${reg2}, {imm}"), "Addiu");
+}
+
+TEST_F(AssemblerMIPSTest, Subu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Subu, "subu ${reg1}, ${reg2}, ${reg3}"), "Subu");
+}
+
+TEST_F(AssemblerMIPSTest, MultR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::MultR2, "mult ${reg1}, ${reg2}"), "MultR2");
+}
+
+TEST_F(AssemblerMIPSTest, MultuR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::MultuR2, "multu ${reg1}, ${reg2}"), "MultuR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivR2Basic) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg1}, ${reg2}"), "DivR2Basic");
+}
+
+TEST_F(AssemblerMIPSTest, DivuR2Basic) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg1}, ${reg2}"), "DivuR2Basic");
+}
+
+TEST_F(AssemblerMIPSTest, MulR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::MulR2, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
+ "DivR2");
+}
+
+TEST_F(AssemblerMIPSTest, ModR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModR2, "div $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
+ "ModR2");
+}
+
+TEST_F(AssemblerMIPSTest, DivuR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
+ "DivuR2");
+}
+
+TEST_F(AssemblerMIPSTest, ModuR2) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR2, "divu $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
+ "ModuR2");
+}
+
+TEST_F(AssemblerMIPSTest, And) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::And, "and ${reg1}, ${reg2}, ${reg3}"), "And");
+}
+
+TEST_F(AssemblerMIPSTest, Andi) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Andi, 16, "andi ${reg1}, ${reg2}, {imm}"), "Andi");
+}
+
+TEST_F(AssemblerMIPSTest, Or) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Or, "or ${reg1}, ${reg2}, ${reg3}"), "Or");
+}
+
+TEST_F(AssemblerMIPSTest, Ori) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Ori, 16, "ori ${reg1}, ${reg2}, {imm}"), "Ori");
+}
+
+TEST_F(AssemblerMIPSTest, Xor) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Xor, "xor ${reg1}, ${reg2}, ${reg3}"), "Xor");
+}
+
+TEST_F(AssemblerMIPSTest, Xori) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Xori, 16, "xori ${reg1}, ${reg2}, {imm}"), "Xori");
+}
+
+TEST_F(AssemblerMIPSTest, Nor) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "Nor");
+}
+
+TEST_F(AssemblerMIPSTest, Seb) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Seb, "seb ${reg1}, ${reg2}"), "Seb");
+}
+
+TEST_F(AssemblerMIPSTest, Seh) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Seh, "seh ${reg1}, ${reg2}"), "Seh");
+}
+
+TEST_F(AssemblerMIPSTest, Sll) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "Sll");
+}
+
+TEST_F(AssemblerMIPSTest, Srl) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "Srl");
+}
+
+TEST_F(AssemblerMIPSTest, Sra) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "Sra");
+}
+
+TEST_F(AssemblerMIPSTest, Sllv) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "Sllv");
+}
+
+TEST_F(AssemblerMIPSTest, Srlv) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "Srlv");
+}
+
+TEST_F(AssemblerMIPSTest, Srav) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav");
+}
+
+TEST_F(AssemblerMIPSTest, Lb) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb");
+}
+
+TEST_F(AssemblerMIPSTest, Lh) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "Lh");
+}
+
+TEST_F(AssemblerMIPSTest, Lw) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "Lw");
+}
+
+TEST_F(AssemblerMIPSTest, Lbu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "Lbu");
+}
+
+TEST_F(AssemblerMIPSTest, Lhu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lhu, -16, "lhu ${reg1}, {imm}(${reg2})"), "Lhu");
+}
+
+TEST_F(AssemblerMIPSTest, Lui) {
+ DriverStr(RepeatRIb(&mips::MipsAssembler::Lui, 16, "lui ${reg}, {imm}"), "Lui");
+}
+
+TEST_F(AssemblerMIPSTest, Mfhi) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Mfhi, "mfhi ${reg}"), "Mfhi");
+}
+
+TEST_F(AssemblerMIPSTest, Mflo) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Mflo, "mflo ${reg}"), "Mflo");
+}
+
+TEST_F(AssemblerMIPSTest, Sb) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sb, -16, "sb ${reg1}, {imm}(${reg2})"), "Sb");
+}
+
+TEST_F(AssemblerMIPSTest, Sh) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "Sh");
+}
+
+TEST_F(AssemblerMIPSTest, Sw) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "Sw");
+}
+
+TEST_F(AssemblerMIPSTest, Slt) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "Slt");
+}
+
+TEST_F(AssemblerMIPSTest, Sltu) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Sltu, "sltu ${reg1}, ${reg2}, ${reg3}"), "Sltu");
+}
+
+TEST_F(AssemblerMIPSTest, Slti) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Slti, -16, "slti ${reg1}, ${reg2}, {imm}"), "Slti");
+}
+
+TEST_F(AssemblerMIPSTest, Sltiu) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Sltiu, -16, "sltiu ${reg1}, ${reg2}, {imm}"), "Sltiu");
+}
+
+TEST_F(AssemblerMIPSTest, AddS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::AddS, "add.s ${reg1}, ${reg2}, ${reg3}"), "AddS");
+}
+
+TEST_F(AssemblerMIPSTest, AddD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::AddD, "add.d ${reg1}, ${reg2}, ${reg3}"), "AddD");
+}
+
+TEST_F(AssemblerMIPSTest, SubS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SubS, "sub.s ${reg1}, ${reg2}, ${reg3}"), "SubS");
+}
+
+TEST_F(AssemblerMIPSTest, SubD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::SubD, "sub.d ${reg1}, ${reg2}, ${reg3}"), "SubD");
+}
+
+TEST_F(AssemblerMIPSTest, MulS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MulS, "mul.s ${reg1}, ${reg2}, ${reg3}"), "MulS");
+}
+
+TEST_F(AssemblerMIPSTest, MulD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::MulD, "mul.d ${reg1}, ${reg2}, ${reg3}"), "MulD");
+}
+
+TEST_F(AssemblerMIPSTest, DivS) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::DivS, "div.s ${reg1}, ${reg2}, ${reg3}"), "DivS");
+}
+
+TEST_F(AssemblerMIPSTest, DivD) {
+ DriverStr(RepeatFFF(&mips::MipsAssembler::DivD, "div.d ${reg1}, ${reg2}, ${reg3}"), "DivD");
+}
+
+TEST_F(AssemblerMIPSTest, MovS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::MovS, "mov.s ${reg1}, ${reg2}"), "MovS");
+}
+
+TEST_F(AssemblerMIPSTest, MovD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::MovD, "mov.d ${reg1}, ${reg2}"), "MovD");
+}
+
+TEST_F(AssemblerMIPSTest, NegS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::NegS, "neg.s ${reg1}, ${reg2}"), "NegS");
+}
+
+TEST_F(AssemblerMIPSTest, NegD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::NegD, "neg.d ${reg1}, ${reg2}"), "NegD");
+}
+
+TEST_F(AssemblerMIPSTest, CvtSW) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW");
+}
+
+TEST_F(AssemblerMIPSTest, CvtDW) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "CvtDW");
+}
+
+TEST_F(AssemblerMIPSTest, CvtSD) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "CvtSD");
+}
+
+TEST_F(AssemblerMIPSTest, CvtDS) {
+ DriverStr(RepeatFF(&mips::MipsAssembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "CvtDS");
+}
+
+TEST_F(AssemblerMIPSTest, Mfc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mfc1, "mfc1 ${reg1}, ${reg2}"), "Mfc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mtc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mtc1, "mtc1 ${reg1}, ${reg2}"), "Mtc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mfhc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mfhc1, "mfhc1 ${reg1}, ${reg2}"), "Mfhc1");
+}
+
+TEST_F(AssemblerMIPSTest, Mthc1) {
+ DriverStr(RepeatRF(&mips::MipsAssembler::Mthc1, "mthc1 ${reg1}, ${reg2}"), "Mthc1");
+}
+
+TEST_F(AssemblerMIPSTest, Lwc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Lwc1, -16, "lwc1 ${reg1}, {imm}(${reg2})"), "Lwc1");
+}
+
+TEST_F(AssemblerMIPSTest, Ldc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Ldc1, -16, "ldc1 ${reg1}, {imm}(${reg2})"), "Ldc1");
+}
+
+TEST_F(AssemblerMIPSTest, Swc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Swc1, -16, "swc1 ${reg1}, {imm}(${reg2})"), "Swc1");
+}
+
+TEST_F(AssemblerMIPSTest, Sdc1) {
+ DriverStr(RepeatFRIb(&mips::MipsAssembler::Sdc1, -16, "sdc1 ${reg1}, {imm}(${reg2})"), "Sdc1");
+}
+
+TEST_F(AssemblerMIPSTest, Move) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Move, "or ${reg1}, ${reg2}, $zero"), "Move");
+}
+
+TEST_F(AssemblerMIPSTest, Clear) {
+ DriverStr(RepeatR(&mips::MipsAssembler::Clear, "or ${reg}, $zero, $zero"), "Clear");
+}
+
+TEST_F(AssemblerMIPSTest, Not) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::Not, "nor ${reg1}, ${reg2}, $zero"), "Not");
+}
+
+TEST_F(AssemblerMIPSTest, LoadFromOffset) {
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadSignedByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 256);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 1000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x10000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0x12345678);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, -256);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadWord, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A1, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A1, mips::A0, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 256);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 1000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x10000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0x12345678);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -256);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xFFFF8000);
+ __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+
+ const char* expected =
+ "lb $a0, 0($a0)\n"
+ "lb $a0, 0($a1)\n"
+ "lb $a0, 256($a1)\n"
+ "lb $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+ "lb $a0, -256($a1)\n"
+ "lb $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lb $a0, 0($at)\n"
+
+ "lbu $a0, 0($a0)\n"
+ "lbu $a0, 0($a1)\n"
+ "lbu $a0, 256($a1)\n"
+ "lbu $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+ "lbu $a0, -256($a1)\n"
+ "lbu $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lbu $a0, 0($at)\n"
+
+ "lh $a0, 0($a0)\n"
+ "lh $a0, 0($a1)\n"
+ "lh $a0, 256($a1)\n"
+ "lh $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+ "lh $a0, -256($a1)\n"
+ "lh $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lh $a0, 0($at)\n"
+
+ "lhu $a0, 0($a0)\n"
+ "lhu $a0, 0($a1)\n"
+ "lhu $a0, 256($a1)\n"
+ "lhu $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+ "lhu $a0, -256($a1)\n"
+ "lhu $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lhu $a0, 0($at)\n"
+
+ "lw $a0, 0($a0)\n"
+ "lw $a0, 0($a1)\n"
+ "lw $a0, 256($a1)\n"
+ "lw $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+ "lw $a0, -256($a1)\n"
+ "lw $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "lw $a0, 0($at)\n"
+
+ "lw $a1, 4($a0)\n"
+ "lw $a0, 0($a0)\n"
+ "lw $a0, 0($a1)\n"
+ "lw $a1, 4($a1)\n"
+ "lw $a1, 0($a0)\n"
+ "lw $a2, 4($a0)\n"
+ "lw $a0, 0($a2)\n"
+ "lw $a1, 4($a2)\n"
+ "lw $a0, 256($a2)\n"
+ "lw $a1, 260($a2)\n"
+ "lw $a0, 1000($a2)\n"
+ "lw $a1, 1004($a2)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n"
+ "lw $a0, -256($a2)\n"
+ "lw $a1, -252($a2)\n"
+ "lw $a0, 0xFFFF8000($a2)\n"
+ "lw $a1, 0xFFFF8004($a2)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a2\n"
+ "lw $a0, 0($at)\n"
+ "lw $a1, 4($at)\n";
+ DriverStr(expected, "LoadFromOffset");
+}
+
+TEST_F(AssemblerMIPSTest, LoadSFromOffset) {
+ __ LoadSFromOffset(mips::F0, mips::A0, 0);
+ __ LoadSFromOffset(mips::F0, mips::A0, 4);
+ __ LoadSFromOffset(mips::F0, mips::A0, 256);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x8000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x10000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0x12345678);
+ __ LoadSFromOffset(mips::F0, mips::A0, -256);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ LoadSFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "lwc1 $f0, 0($a0)\n"
+ "lwc1 $f0, 4($a0)\n"
+ "lwc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n"
+ "lwc1 $f0, -256($a0)\n"
+ "lwc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "lwc1 $f0, 0($at)\n";
+ DriverStr(expected, "LoadSFromOffset");
+}
+
+
+TEST_F(AssemblerMIPSTest, LoadDFromOffset) {
+ __ LoadDFromOffset(mips::F0, mips::A0, 0);
+ __ LoadDFromOffset(mips::F0, mips::A0, 4);
+ __ LoadDFromOffset(mips::F0, mips::A0, 256);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x10000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0x12345678);
+ __ LoadDFromOffset(mips::F0, mips::A0, -256);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ LoadDFromOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "ldc1 $f0, 0($a0)\n"
+ "lwc1 $f0, 4($a0)\n"
+ "lwc1 $f1, 8($a0)\n"
+ "ldc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n"
+ "ldc1 $f0, -256($a0)\n"
+ "ldc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "ldc1 $f0, 0($at)\n";
+ DriverStr(expected, "LoadDFromOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreToOffset) {
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreByte, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreHalfword, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A0, 0);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 256);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 1000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x10000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0x12345678);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, -256);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreWord, mips::A0, mips::A1, 0xABCDEF00);
+
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 256);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 1000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x10000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0x12345678);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -256);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xFFFF8000);
+ __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, 0xABCDEF00);
+
+ const char* expected =
+ "sb $a0, 0($a0)\n"
+ "sb $a0, 0($a1)\n"
+ "sb $a0, 256($a1)\n"
+ "sb $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+ "sb $a0, -256($a1)\n"
+ "sb $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sb $a0, 0($at)\n"
+
+ "sh $a0, 0($a0)\n"
+ "sh $a0, 0($a1)\n"
+ "sh $a0, 256($a1)\n"
+ "sh $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+ "sh $a0, -256($a1)\n"
+ "sh $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sh $a0, 0($at)\n"
+
+ "sw $a0, 0($a0)\n"
+ "sw $a0, 0($a1)\n"
+ "sw $a0, 256($a1)\n"
+ "sw $a0, 1000($a1)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+ "sw $a0, -256($a1)\n"
+ "sw $a0, 0xFFFF8000($a1)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a1\n"
+ "sw $a0, 0($at)\n"
+
+ "sw $a0, 0($a2)\n"
+ "sw $a1, 4($a2)\n"
+ "sw $a0, 256($a2)\n"
+ "sw $a1, 260($a2)\n"
+ "sw $a0, 1000($a2)\n"
+ "sw $a1, 1004($a2)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n"
+ "sw $a0, -256($a2)\n"
+ "sw $a1, -252($a2)\n"
+ "sw $a0, 0xFFFF8000($a2)\n"
+ "sw $a1, 0xFFFF8004($a2)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a2\n"
+ "sw $a0, 0($at)\n"
+ "sw $a1, 4($at)\n";
+ DriverStr(expected, "StoreToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreSToOffset) {
+ __ StoreSToOffset(mips::F0, mips::A0, 0);
+ __ StoreSToOffset(mips::F0, mips::A0, 4);
+ __ StoreSToOffset(mips::F0, mips::A0, 256);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x8000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x10000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0x12345678);
+ __ StoreSToOffset(mips::F0, mips::A0, -256);
+ __ StoreSToOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ StoreSToOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "swc1 $f0, 0($a0)\n"
+ "swc1 $f0, 4($a0)\n"
+ "swc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n"
+ "swc1 $f0, -256($a0)\n"
+ "swc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "swc1 $f0, 0($at)\n";
+ DriverStr(expected, "StoreSToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, StoreDToOffset) {
+ __ StoreDToOffset(mips::F0, mips::A0, 0);
+ __ StoreDToOffset(mips::F0, mips::A0, 4);
+ __ StoreDToOffset(mips::F0, mips::A0, 256);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x8000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x10000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0x12345678);
+ __ StoreDToOffset(mips::F0, mips::A0, -256);
+ __ StoreDToOffset(mips::F0, mips::A0, 0xFFFF8000);
+ __ StoreDToOffset(mips::F0, mips::A0, 0xABCDEF00);
+
+ const char* expected =
+ "sdc1 $f0, 0($a0)\n"
+ "swc1 $f0, 4($a0)\n"
+ "swc1 $f1, 8($a0)\n"
+ "sdc1 $f0, 256($a0)\n"
+ "ori $at, $zero, 0x8000\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "lui $at, 1\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "lui $at, 0x1234\n"
+ "ori $at, 0x5678\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n"
+ "sdc1 $f0, -256($a0)\n"
+ "sdc1 $f0, 0xFFFF8000($a0)\n"
+ "lui $at, 0xABCD\n"
+ "ori $at, 0xEF00\n"
+ "addu $at, $at, $a0\n"
+ "sdc1 $f0, 0($at)\n";
+ DriverStr(expected, "StoreDToOffset");
+}
+
+TEST_F(AssemblerMIPSTest, B) {
+ mips::MipsLabel label1, label2;
+ __ B(&label1);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label1);
+ __ B(&label2);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label2);
+ __ B(&label1);
+
+ std::string expected =
+ ".set noreorder\n"
+ "b 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n"
+ "b 2f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "2:\n"
+ "b 1b\n"
+ "nop\n";
+ DriverStr(expected, "B");
+}
+
+TEST_F(AssemblerMIPSTest, Beq) {
+ mips::MipsLabel label;
+ __ Beq(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Beq(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "beq $a0, $a1, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "beq $a2, $a3, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Beq");
+}
+
+TEST_F(AssemblerMIPSTest, Bne) {
+ mips::MipsLabel label;
+ __ Bne(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bne(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bne $a0, $a1, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bne $a2, $a3, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bne");
+}
+
+TEST_F(AssemblerMIPSTest, Beqz) {
+ mips::MipsLabel label;
+ __ Beqz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Beqz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "beq $zero, $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "beq $zero, $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Beqz");
+}
+
+TEST_F(AssemblerMIPSTest, Bnez) {
+ mips::MipsLabel label;
+ __ Bnez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bnez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bne $zero, $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bne $zero, $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bnez");
+}
+
+TEST_F(AssemblerMIPSTest, Bltz) {
+ mips::MipsLabel label;
+ __ Bltz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bltz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bltz $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bltz $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bltz");
+}
+
+TEST_F(AssemblerMIPSTest, Bgez) {
+ mips::MipsLabel label;
+ __ Bgez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bgez $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bgez $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgez");
+}
+
+TEST_F(AssemblerMIPSTest, Blez) {
+ mips::MipsLabel label;
+ __ Blez(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Blez(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "blez $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "blez $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Blez");
+}
+
+TEST_F(AssemblerMIPSTest, Bgtz) {
+ mips::MipsLabel label;
+ __ Bgtz(mips::A0, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgtz(mips::A1, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "bgtz $a0, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "bgtz $a1, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgtz");
+}
+
+TEST_F(AssemblerMIPSTest, Blt) {
+ mips::MipsLabel label;
+ __ Blt(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Blt(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "slt $at, $a0, $a1\n"
+ "bne $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "slt $at, $a2, $a3\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Blt");
+}
+
+TEST_F(AssemblerMIPSTest, Bge) {
+ mips::MipsLabel label;
+ __ Bge(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bge(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "slt $at, $a0, $a1\n"
+ "beq $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "slt $at, $a2, $a3\n"
+ "beq $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bge");
+}
+
+TEST_F(AssemblerMIPSTest, Bltu) {
+ mips::MipsLabel label;
+ __ Bltu(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bltu(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "sltu $at, $a0, $a1\n"
+ "bne $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "sltu $at, $a2, $a3\n"
+ "bne $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bltu");
+}
+
+TEST_F(AssemblerMIPSTest, Bgeu) {
+ mips::MipsLabel label;
+ __ Bgeu(mips::A0, mips::A1, &label);
+ constexpr size_t kAdduCount1 = 63;
+ for (size_t i = 0; i != kAdduCount1; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bind(&label);
+ constexpr size_t kAdduCount2 = 64;
+ for (size_t i = 0; i != kAdduCount2; ++i) {
+ __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
+ }
+ __ Bgeu(mips::A2, mips::A3, &label);
+
+ std::string expected =
+ ".set noreorder\n"
+ "sltu $at, $a0, $a1\n"
+ "beq $zero, $at, 1f\n"
+ "nop\n" +
+ RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
+ "1:\n" +
+ RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
+ "sltu $at, $a2, $a3\n"
+ "beq $zero, $at, 1b\n"
+ "nop\n";
+ DriverStr(expected, "Bgeu");
+}
+
+#undef __
+
+} // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 04e815aa1d..5347bf0302 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -2369,44 +2369,48 @@ void X86Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = Low32Bits(v);
int32_t v_high = High32Bits(v);
if (buffer_.size() > 1) {
// Ensure we don't pass the end of the buffer.
for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
- return i * kEntrySize;
+ return i * elem_size_;
}
}
}
// Didn't match anything.
- int result = buffer_.size() * kEntrySize;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 93ecdf52fe..b50fda907a 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -166,6 +166,39 @@ class Address : public Operand {
Init(base_in, disp.Int32Value());
}
+ Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
+ CHECK_NE(index_in, ESP); // Illegal addressing mode.
+ SetModRM(0, ESP);
+ SetSIB(scale_in, index_in, EBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ Init(base_in, index_in, scale_in, disp);
+ }
+
+ Address(Register base_in,
+ Register index_in,
+ ScaleFactor scale_in,
+ int32_t disp, AssemblerFixup *fixup) {
+ Init(base_in, index_in, scale_in, disp);
+ SetFixup(fixup);
+ }
+
+ static Address Absolute(uintptr_t addr) {
+ Address result;
+ result.SetModRM(0, EBP);
+ result.SetDisp32(addr);
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset<4> addr) {
+ return Absolute(addr.Int32Value());
+ }
+
+ private:
+ Address() {}
+
void Init(Register base_in, int32_t disp) {
if (disp == 0 && base_in != EBP) {
SetModRM(0, base_in);
@@ -181,14 +214,7 @@ class Address : public Operand {
}
}
- Address(Register index_in, ScaleFactor scale_in, int32_t disp) {
- CHECK_NE(index_in, ESP); // Illegal addressing mode.
- SetModRM(0, ESP);
- SetSIB(scale_in, index_in, EBP);
- SetDisp32(disp);
- }
-
- Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
+ void Init(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) {
CHECK_NE(index_in, ESP); // Illegal addressing mode.
if (disp == 0 && base_in != EBP) {
SetModRM(0, ESP);
@@ -203,20 +229,6 @@ class Address : public Operand {
SetDisp32(disp);
}
}
-
- static Address Absolute(uintptr_t addr) {
- Address result;
- result.SetModRM(0, EBP);
- result.SetDisp32(addr);
- return result;
- }
-
- static Address Absolute(ThreadOffset<4> addr) {
- return Absolute(addr.Int32Value());
- }
-
- private:
- Address() {}
};
@@ -252,40 +264,39 @@ class ConstantArea {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v);
+ size_t AddDouble(double v);
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v);
+ size_t AddFloat(float v);
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v);
+ size_t AddInt32(int32_t v);
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v);
+ size_t AddInt64(int64_t v);
bool IsEmpty() const {
return buffer_.size() == 0;
}
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
-
- void AddFixup(AssemblerFixup* fixup) {
- fixups_.push_back(fixup);
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
}
- const std::vector<AssemblerFixup*>& GetFixups() const {
- return fixups_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
}
private:
- static constexpr size_t kEntrySize = sizeof(int32_t);
+ static constexpr size_t elem_size_ = sizeof(int32_t);
std::vector<int32_t> buffer_;
- std::vector<AssemblerFixup*> fixups_;
};
class X86Assembler FINAL : public Assembler {
@@ -740,26 +751,36 @@ class X86Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
- void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
private:
inline void EmitUint8(uint8_t value);
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 6e7d74d528..9eb5e67041 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -3122,7 +3122,14 @@ void X86_64Assembler::AddConstantArea() {
}
}
-int ConstantArea::AddInt32(int32_t v) {
+size_t ConstantArea::AppendInt32(int32_t v) {
+ size_t result = buffer_.size() * elem_size_;
+ buffer_.push_back(v);
+ return result;
+}
+
+size_t ConstantArea::AddInt32(int32_t v) {
+ // Look for an existing match.
for (size_t i = 0, e = buffer_.size(); i < e; i++) {
if (v == buffer_[i]) {
return i * elem_size_;
@@ -3130,12 +3137,10 @@ int ConstantArea::AddInt32(int32_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
- buffer_.push_back(v);
- return result;
+ return AppendInt32(v);
}
-int ConstantArea::AddInt64(int64_t v) {
+size_t ConstantArea::AddInt64(int64_t v) {
int32_t v_low = v;
int32_t v_high = v >> 32;
if (buffer_.size() > 1) {
@@ -3148,18 +3153,18 @@ int ConstantArea::AddInt64(int64_t v) {
}
// Didn't match anything.
- int result = buffer_.size() * elem_size_;
+ size_t result = buffer_.size() * elem_size_;
buffer_.push_back(v_low);
buffer_.push_back(v_high);
return result;
}
-int ConstantArea::AddDouble(double v) {
+size_t ConstantArea::AddDouble(double v) {
// Treat the value as a 64-bit integer value.
return AddInt64(bit_cast<int64_t, double>(v));
}
-int ConstantArea::AddFloat(float v) {
+size_t ConstantArea::AddFloat(float v) {
// Treat the value as a 32-bit integer value.
return AddInt32(bit_cast<int32_t, float>(v));
}
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 255f551675..01d28e305d 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -269,36 +269,40 @@ class Address : public Operand {
* Class to handle constant area values.
*/
class ConstantArea {
- public:
- ConstantArea() {}
+ public:
+ ConstantArea() {}
- // Add a double to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddDouble(double v);
+ // Add a double to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddDouble(double v);
- // Add a float to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddFloat(float v);
+ // Add a float to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddFloat(float v);
- // Add an int32_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt32(int32_t v);
+ // Add an int32_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt32(int32_t v);
- // Add an int64_t to the constant area, returning the offset into
- // the constant area where the literal resides.
- int AddInt64(int64_t v);
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v);
- int GetSize() const {
- return buffer_.size() * elem_size_;
- }
+ // Add an int64_t to the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AddInt64(int64_t v);
- const std::vector<int32_t>& GetBuffer() const {
- return buffer_;
- }
+ size_t GetSize() const {
+ return buffer_.size() * elem_size_;
+ }
- private:
- static constexpr size_t elem_size_ = sizeof(int32_t);
- std::vector<int32_t> buffer_;
+ const std::vector<int32_t>& GetBuffer() const {
+ return buffer_;
+ }
+
+ private:
+ static constexpr size_t elem_size_ = sizeof(int32_t);
+ std::vector<int32_t> buffer_;
};
@@ -806,19 +810,27 @@ class X86_64Assembler FINAL : public Assembler {
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddDouble(double v) { return constant_area_.AddDouble(v); }
+ size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
// Add a float to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddFloat(float v) { return constant_area_.AddFloat(v); }
+ size_t AddFloat(float v) { return constant_area_.AddFloat(v); }
// Add an int32_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+ size_t AddInt32(int32_t v) {
+ return constant_area_.AddInt32(v);
+ }
+
+ // Add an int32_t to the end of the constant area, returning the offset into
+ // the constant area where the literal resides.
+ size_t AppendInt32(int32_t v) {
+ return constant_area_.AppendInt32(v);
+ }
// Add an int64_t to the constant area, returning the offset into
// the constant area where the literal resides.
- int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+ size_t AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
// Add the contents of the constant area to the assembler buffer.
void AddConstantArea();
@@ -826,6 +838,9 @@ class X86_64Assembler FINAL : public Assembler {
// Is the constant area empty? Return true if there are no literals in the constant area.
bool IsConstantAreaEmpty() const { return constant_area_.GetSize() == 0; }
+ // Return the current size of the constant area.
+ size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
+
//
// Heap poisoning.
//
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 680e2d7b45..17c528209b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1401,7 +1401,7 @@ class Dex2Oat FINAL {
}
ScopedObjectAccess soa(self);
dex_caches_.push_back(soa.AddLocalReference<jobject>(
- class_linker->RegisterDexFile(*dex_file)));
+ class_linker->RegisterDexFile(*dex_file, Runtime::Current()->GetLinearAlloc())));
}
// If we use a swap file, ensure we are above the threshold to make it necessary.
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index c55d285f9f..faa2d2ded9 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -280,6 +280,7 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
{ kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
{ kITypeMask, 49u << kOpcodeShift, "lwc1", "tO", },
+ { kJTypeMask, 50u << kOpcodeShift, "bc", "P" },
{ kITypeMask, 53u << kOpcodeShift, "ldc1", "tO", },
{ kITypeMask | (0x1f << 21), 54u << kOpcodeShift, "jic", "Ti" },
{ kITypeMask | (1 << 21), (54u << kOpcodeShift) | (1 << 21), "beqzc", "Sb" }, // TODO: de-dup?
@@ -290,6 +291,7 @@ static const MipsInstruction gMipsInstructions[] = {
{ kITypeMask, 55u << kOpcodeShift, "ld", "TO", },
{ kITypeMask, 57u << kOpcodeShift, "swc1", "tO", },
{ kITypeMask | (0x1f << 16), (59u << kOpcodeShift) | (30 << 16), "auipc", "Si" },
+ { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (0 << 19), "addiupc", "Sp" },
{ kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
{ kITypeMask | (0x1f << 21), 62u << kOpcodeShift, "jialc", "Ti" },
{ kITypeMask | (1 << 21), (62u << kOpcodeShift) | (1 << 21), "bnezc", "Sb" }, // TODO: de-dup?
@@ -432,6 +434,22 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
}
}
break;
+ case 'P': // 26-bit offset in bc.
+ {
+ int32_t offset = (instruction & 0x3ffffff) - ((instruction & 0x2000000) << 1);
+ offset <<= 2;
+ offset += 4;
+ args << FormatInstructionPointer(instr_ptr + offset);
+ args << StringPrintf(" ; %+d", offset);
+ }
+ break;
+ case 'p': // 19-bit offset in addiupc.
+ {
+ int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
+ args << offset << " ; move r" << rs << ", ";
+ args << FormatInstructionPointer(instr_ptr + (offset << 2));
+ }
+ break;
case 'S': args << 'r' << rs; break;
case 's': args << 'f' << rs; break;
case 'T': args << 'r' << rt; break;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index e2486041af..dbf536575a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -26,6 +26,7 @@
#include <vector>
#include "arch/instruction_set_features.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
@@ -49,10 +50,12 @@
#include "mirror/object_array-inl.h"
#include "oat.h"
#include "oat_file-inl.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "output_stream.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
+#include "stack_map.h"
#include "ScopedLocalRef.h"
#include "thread_list.h"
#include "verifier/dex_gc_map.h"
@@ -1417,8 +1420,10 @@ class OatDumper {
uint32_t method_access_flags) {
if ((method_access_flags & kAccNative) == 0) {
ScopedObjectAccess soa(Thread::Current());
+ Runtime* const runtime = Runtime::Current();
Handle<mirror::DexCache> dex_cache(
- hs->NewHandle(Runtime::Current()->GetClassLinker()->RegisterDexFile(*dex_file)));
+ hs->NewHandle(runtime->GetClassLinker()->RegisterDexFile(*dex_file,
+ runtime->GetLinearAlloc())));
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
@@ -1563,13 +1568,15 @@ class ImageDumper {
}
os << "\n";
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
std::string image_filename = image_space_.GetImageFilename();
std::string oat_location = ImageHeader::GetOatLocationFromImageLocation(image_filename);
os << "OAT LOCATION: " << oat_location;
os << "\n";
std::string error_msg;
- const OatFile* oat_file = class_linker->FindOpenedOatFileFromOatLocation(oat_location);
+ const OatFile* oat_file = runtime->GetOatFileManager().FindOpenedOatFileFromOatLocation(
+ oat_location);
if (oat_file == nullptr) {
oat_file = OatFile::Open(oat_location, oat_location,
nullptr, nullptr, false, nullptr,
@@ -1594,7 +1601,7 @@ class ImageDumper {
os << "OBJECTS:\n" << std::flush;
// Loop through all the image spaces and dump their objects.
- gc::Heap* heap = Runtime::Current()->GetHeap();
+ gc::Heap* heap = runtime->GetHeap();
const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
Thread* self = Thread::Current();
{
@@ -1958,24 +1965,27 @@ class ImageDumper {
DCHECK(method != nullptr);
const auto image_pointer_size =
InstructionSetPointerSize(state->oat_dumper_->GetOatInstructionSet());
+ const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
+ const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
+ ArtCode art_code(method);
if (method->IsNative()) {
- DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
bool first_occurrence;
- const void* quick_oat_code = state->GetQuickOatCodeBegin(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
- state->ComputeOatSize(quick_oat_code, &first_occurrence);
+ state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
state->stats_.native_to_managed_code_bytes += quick_oat_code_size;
}
- if (quick_oat_code != method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
- indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code);
+ if (quick_oat_code_begin !=
+ method->GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size)) {
+ indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin);
}
} else if (method->IsAbstract() || method->IsCalleeSaveMethod() ||
method->IsResolutionMethod() || method->IsImtConflictMethod() ||
method->IsImtUnimplementedMethod() || method->IsClassInitializer()) {
- DCHECK(method->GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
- DCHECK(method->GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetNativeGcMap(image_pointer_size) == nullptr) << PrettyMethod(method);
+ DCHECK(art_code.GetMappingTable(image_pointer_size) == nullptr) << PrettyMethod(method);
} else {
const DexFile::CodeItem* code_item = method->GetCodeItem();
size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2;
@@ -1983,29 +1993,27 @@ class ImageDumper {
bool first_occurrence;
size_t gc_map_bytes = state->ComputeOatSize(
- method->GetNativeGcMap(image_pointer_size), &first_occurrence);
+ art_code.GetNativeGcMap(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.gc_map_bytes += gc_map_bytes;
}
size_t pc_mapping_table_bytes = state->ComputeOatSize(
- method->GetMappingTable(image_pointer_size), &first_occurrence);
+ art_code.GetMappingTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.pc_mapping_table_bytes += pc_mapping_table_bytes;
}
size_t vmap_table_bytes = 0u;
- if (!method->IsOptimized(image_pointer_size)) {
+ if (!art_code.IsOptimized(image_pointer_size)) {
// Method compiled with the optimizing compiler have no vmap table.
vmap_table_bytes = state->ComputeOatSize(
- method->GetVmapTable(image_pointer_size), &first_occurrence);
+ art_code.GetVmapTable(image_pointer_size), &first_occurrence);
if (first_occurrence) {
state->stats_.vmap_table_bytes += vmap_table_bytes;
}
}
- const void* quick_oat_code_begin = state->GetQuickOatCodeBegin(method);
- const void* quick_oat_code_end = state->GetQuickOatCodeEnd(method);
uint32_t quick_oat_code_size = state->GetQuickOatCodeSize(method);
state->ComputeOatSize(quick_oat_code_begin, &first_occurrence);
if (first_occurrence) {
@@ -2394,13 +2402,13 @@ static int DumpOatWithRuntime(Runtime* runtime, OatFile* oat_file, OatDumperOpti
// Need to register dex files to get a working dex cache.
ScopedObjectAccess soa(self);
ClassLinker* class_linker = runtime->GetClassLinker();
- class_linker->RegisterOatFile(oat_file);
+ runtime->GetOatFileManager().RegisterOatFile(std::unique_ptr<const OatFile>(oat_file));
std::vector<const DexFile*> class_path;
for (const OatFile::OatDexFile* odf : oat_file->GetOatDexFiles()) {
std::string error_msg;
const DexFile* const dex_file = OpenDexFile(odf, &error_msg);
CHECK(dex_file != nullptr) << error_msg;
- class_linker->RegisterDexFile(*dex_file);
+ class_linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc());
class_path.push_back(dex_file);
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 8d81f2a7f6..8fe3fa2df1 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -19,6 +19,7 @@ LOCAL_PATH := $(call my-dir)
include art/build/Android.common_build.mk
LIBART_COMMON_SRC_FILES := \
+ art_code.cc \
art_field.cc \
art_method.cc \
atomic.cc.arm \
@@ -154,6 +155,7 @@ LIBART_COMMON_SRC_FILES := \
oat.cc \
oat_file.cc \
oat_file_assistant.cc \
+ oat_file_manager.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
@@ -203,7 +205,6 @@ LIBART_COMMON_SRC_FILES += \
arch/x86/registers_x86.cc \
arch/x86_64/registers_x86_64.cc \
entrypoints/entrypoint_utils.cc \
- entrypoints/interpreter/interpreter_entrypoints.cc \
entrypoints/jni/jni_entrypoints.cc \
entrypoints/math_entrypoints.cc \
entrypoints/quick/quick_alloc_entrypoints.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 4a45f4970b..e676a09733 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -39,7 +39,7 @@ class ArchTest : public CommonRuntimeTest {
runtime->SetInstructionSet(isa);
ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
runtime->SetCalleeSaveMethod(save_method, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec;
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 8f6b1ff0a5..d5c7846951 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -38,8 +38,8 @@ void ArmContext::Reset() {
}
void ArmContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode art_code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = art_code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 1599025697..76c7c4f6f0 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -47,12 +46,7 @@ extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8
// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d09631bc71..631b784787 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -891,7 +891,110 @@ END art_quick_set64_instance
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+ // Fast path rosalloc allocation.
+ // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
+ // r2, r3, r12: free.
+ ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
+ // Load the class (r2)
+ ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // Check class status.
+ ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
+ cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+ // Add a fake dependence from the
+ // following access flag and size
+ // loads to the status load.
+ // This is to prevent those loads
+ // from being reordered above the
+ // status load and reading wrong
+ // values (an alternative is to use
+ // a load-acquire for the status).
+ eor r3, r3, r3
+ add r2, r2, r3
+ // Check access flags has
+ // kAccClassIsFinalizable
+ ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+ tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ // allocation stack has room.
+ // TODO: consider using ldrd.
+ ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ cmp r3, r12
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
+ cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
+ // local allocation
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ // Compute the rosalloc bracket index
+ // from the size.
+ // Align up the size by the rosalloc
+ // bracket quantum size and divide
+ // by the quantum size and subtract
+ // by 1. This code is a shorter but
+ // equivalent version.
+ sub r3, r3, #1
+ lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // Load the rosalloc run (r12)
+ add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
+ ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // Load the free list head (r3). This
+ // will be the return val.
+ ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
+ // and update the list head with the
+ // next pointer.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ // Store the class pointer in the
+ // header. This also overwrites the
+ // next pointer. The offsets are
+ // asserted to match.
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+ POISON_HEAP_REF r2
+ str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ // Push the new object onto the thread
+ // local allocation stack and
+ // increment the thread local
+ // allocation stack top.
+ ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
+ str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ // Decrement the size of the free list
+ ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ sub r1, #1
+ // TODO: consider combining this store
+ // and the list head store above using
+ // strd.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // class status load.)
+ dmb ish
+ mov r0, r3 // Set the return value and return.
+ bx lr
+
+.Lart_quick_alloc_object_rosalloc_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_rosalloc
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 4477631c67..cdc03fe16f 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -40,8 +40,8 @@ void Arm64Context::Reset() {
}
void Arm64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index e9c816f260..371cbb2673 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -30,12 +29,7 @@ namespace art {
extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
const mirror::Class* ref_class);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 1f2ce02852..395cee8f05 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -30,7 +30,7 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant(
// Look for variants that need a fix for a53 erratum 835769.
static const char* arm64_variants_with_a53_835769_bug[] = {
- "default", "generic" // Pessimistically assume all generic ARM64s are A53s.
+ "default", "generic", "cortex-a53" // Pessimistically assume all generic ARM64s are A53s.
};
bool needs_a53_835769_fix = FindVariantInArray(arm64_variants_with_a53_835769_bug,
arraysize(arm64_variants_with_a53_835769_bug),
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
index 9cfd2eb2d6..ff9c0b320d 100644
--- a/runtime/arch/instruction_set.h
+++ b/runtime/arch/instruction_set.h
@@ -107,6 +107,22 @@ static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
}
}
+static inline bool IsValidInstructionSet(InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ case kThumb2:
+ case kArm64:
+ case kX86:
+ case kX86_64:
+ case kMips:
+ case kMips64:
+ return true;
+ case kNone:
+ default:
+ return false;
+ }
+}
+
size_t GetInstructionSetAlignment(InstructionSet isa);
static inline bool Is64BitInstructionSet(InstructionSet isa) {
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 08ab356855..dba62d9200 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -38,8 +38,8 @@ void MipsContext::Reset() {
}
void MipsContext::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 6721e5452f..59421dd8b9 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -15,7 +15,6 @@
*/
#include "atomic.h"
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -59,12 +58,7 @@ extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
index 2c17f1c118..d808c9e0dc 100644
--- a/runtime/arch/mips64/context_mips64.cc
+++ b/runtime/arch/mips64/context_mips64.cc
@@ -38,8 +38,8 @@ void Mips64Context::Reset() {
}
void Mips64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 9f1f0e021c..417d5fc632 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -15,7 +15,6 @@
*/
#include "atomic.h"
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -57,12 +56,7 @@ extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 68156ae7e3..66c8aadf33 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1615,5 +1615,70 @@ ENTRY art_quick_deoptimize_from_compiled_code
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize_from_compiled_code
-UNIMPLEMENTED art_quick_indexof
-UNIMPLEMENTED art_quick_string_compareto
+ .set push
+ .set noat
+/* java.lang.String.compareTo(String anotherString) */
+ENTRY_NO_GP art_quick_string_compareto
+/* $a0 holds address of "this" */
+/* $a1 holds address of "anotherString" */
+ beq $a0,$a1,9f # this and anotherString are the same object
+ move $v0,$zero
+
+ lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+ sltu $at,$a2,$a3
+ seleqz $t2,$a3,$at
+ selnez $at,$a2,$at
+ or $t2,$t2,$at # $t2 now holds min(this.length(),anotherString.length())
+
+ beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
+ subu $v0,$a2,$a3 # if $t2==0 return
+ # (this.length() - anotherString.length())
+1:
+ lhu $t0,MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1,MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0,$t1,9f # if this.charAt(i) != anotherString.charAt(i)
+ subu $v0,$t0,$t1 # return (this.charAt(i) - anotherString.charAt(i))
+ daddiu $a0,$a0,2 # point at this.charAt(i++)
+ subu $t2,$t2,1 # new value of
+ # min(this.length(),anotherString.length())-i
+ bnez $t2,1b
+ daddiu $a1,$a1,2 # point at anotherString.charAt(i++)
+ subu $v0,$a2,$a3
+
+9:
+ j $ra
+ nop
+END art_quick_string_compareto
+
+/* java.lang.String.indexOf(int ch, int fromIndex=0) */
+ENTRY_NO_GP art_quick_indexof
+/* $a0 holds address of "this" */
+/* $a1 holds address of "ch" */
+/* $a2 holds address of "fromIndex" */
+ lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ subu $t0,$t0,$a2 # this.length() - offset
+ blez $t0,6f # if this.length()-offset <= 0
+ li $v0,-1 # return -1;
+
+ sll $v0,$a2,1 # $a0 += $a2 * 2
+ daddu $a0,$a0,$v0 # " " " " "
+ move $v0,$a2 # Set i to offset.
+
+1:
+ lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3,$a1,6f # return i;
+ daddu $a0,$a0,2 # i++
+ subu $t0,$t0,1 # this.length() - i
+ bnez $t0,1b # while this.length() - i > 0
+ addu $v0,$v0,1 # i++
+
+ li $v0,-1 # if this.length() - i <= 0
+ # return -1;
+
+6:
+ j $ra
+ nop
+END art_quick_indexof
+
+ .set pop
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index fe04bf5a77..fbacdbc930 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -57,3 +57,191 @@ GENERATE_ALLOC_ENTRYPOINTS _region_instrumented, RegionInstrumented
GENERATE_ALLOC_ENTRYPOINTS _region_tlab, RegionTLAB
GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented
.endm
+
+// Generate the allocation entrypoints for each allocator. This is used as an alternative to
+// GNERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
+// hand-written assembly.
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
+ FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
+ THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
+ ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+
+.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
+
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
+
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
+
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
+.endm
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c3a5ce3429..1d10e5db39 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -71,351 +71,7 @@ class StubTest : public CommonRuntimeTest {
// TODO: Set up a frame according to referrer's specs.
size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
ArtMethod* referrer) {
- // Push a transition back into managed code onto the linked list in thread.
- ManagedStack fragment;
- self->PushManagedStackFragment(&fragment);
-
- size_t result;
- size_t fpr_result = 0;
-#if defined(__i386__)
- // TODO: Set the thread?
- __asm__ __volatile__(
- "subl $12, %%esp\n\t" // Align stack.
- "pushl %[referrer]\n\t" // Store referrer.
- "call *%%edi\n\t" // Call the stub
- "addl $16, %%esp" // Pop referrer
- : "=a" (result)
- // Use the result from eax
- : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
- // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
- : "memory"); // clobber.
- // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
- // but compilation fails when declaring that.
-#elif defined(__arm__)
- __asm__ __volatile__(
- "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
- ".cfi_adjust_cfa_offset 52\n\t"
- "push {r9}\n\t"
- ".cfi_adjust_cfa_offset 4\n\t"
- "mov r9, %[referrer]\n\n"
- "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
- ".cfi_adjust_cfa_offset 8\n\t"
- "ldr r9, [sp, #8]\n\t"
-
- // Push everything on the stack, so we don't rely on the order. What a mess. :-(
- "sub sp, sp, #20\n\t"
- "str %[arg0], [sp]\n\t"
- "str %[arg1], [sp, #4]\n\t"
- "str %[arg2], [sp, #8]\n\t"
- "str %[code], [sp, #12]\n\t"
- "str %[self], [sp, #16]\n\t"
- "ldr r0, [sp]\n\t"
- "ldr r1, [sp, #4]\n\t"
- "ldr r2, [sp, #8]\n\t"
- "ldr r3, [sp, #12]\n\t"
- "ldr r9, [sp, #16]\n\t"
- "add sp, sp, #20\n\t"
-
- "blx r3\n\t" // Call the stub
- "add sp, sp, #12\n\t" // Pop null and padding
- ".cfi_adjust_cfa_offset -12\n\t"
- "pop {r1-r12, lr}\n\t" // Restore state
- ".cfi_adjust_cfa_offset -52\n\t"
- "mov %[result], r0\n\t" // Save the result
- : [result] "=r" (result)
- // Use the result from r0
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "r0", "memory"); // clobber.
-#elif defined(__aarch64__)
- __asm__ __volatile__(
- // Spill x0-x7 which we say we don't clobber. May contain args.
- "sub sp, sp, #64\n\t"
- ".cfi_adjust_cfa_offset 64\n\t"
- "stp x0, x1, [sp]\n\t"
- "stp x2, x3, [sp, #16]\n\t"
- "stp x4, x5, [sp, #32]\n\t"
- "stp x6, x7, [sp, #48]\n\t"
-
- "sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
- ".cfi_adjust_cfa_offset 16\n\t"
- "str %[referrer], [sp]\n\t" // referrer
-
- // Push everything on the stack, so we don't rely on the order. What a mess. :-(
- "sub sp, sp, #48\n\t"
- ".cfi_adjust_cfa_offset 48\n\t"
- // All things are "r" constraints, so direct str/stp should work.
- "stp %[arg0], %[arg1], [sp]\n\t"
- "stp %[arg2], %[code], [sp, #16]\n\t"
- "str %[self], [sp, #32]\n\t"
-
- // Now we definitely have x0-x3 free, use it to garble d8 - d15
- "movk x0, #0xfad0\n\t"
- "movk x0, #0xebad, lsl #16\n\t"
- "movk x0, #0xfad0, lsl #32\n\t"
- "movk x0, #0xebad, lsl #48\n\t"
- "fmov d8, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d9, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d10, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d11, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d12, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d13, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d14, x0\n\t"
- "add x0, x0, 1\n\t"
- "fmov d15, x0\n\t"
-
- // Load call params into the right registers.
- "ldp x0, x1, [sp]\n\t"
- "ldp x2, x3, [sp, #16]\n\t"
- "ldr x19, [sp, #32]\n\t"
- "add sp, sp, #48\n\t"
- ".cfi_adjust_cfa_offset -48\n\t"
-
-
- "blr x3\n\t" // Call the stub
- "mov x8, x0\n\t" // Store result
- "add sp, sp, #16\n\t" // Drop the quick "frame"
- ".cfi_adjust_cfa_offset -16\n\t"
-
- // Test d8 - d15. We can use x1 and x2.
- "movk x1, #0xfad0\n\t"
- "movk x1, #0xebad, lsl #16\n\t"
- "movk x1, #0xfad0, lsl #32\n\t"
- "movk x1, #0xebad, lsl #48\n\t"
- "fmov x2, d8\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d9\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d10\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d11\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d12\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d13\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d14\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
- "add x1, x1, 1\n\t"
-
- "fmov x2, d15\n\t"
- "cmp x1, x2\n\t"
- "b.ne 1f\n\t"
-
- "mov x9, #0\n\t" // Use x9 as flag, in clobber list
-
- // Finish up.
- "2:\n\t"
- "ldp x0, x1, [sp]\n\t" // Restore stuff not named clobbered, may contain fpr_result
- "ldp x2, x3, [sp, #16]\n\t"
- "ldp x4, x5, [sp, #32]\n\t"
- "ldp x6, x7, [sp, #48]\n\t"
- "add sp, sp, #64\n\t" // Free stack space, now sp as on entry
- ".cfi_adjust_cfa_offset -64\n\t"
-
- "str x9, %[fpr_result]\n\t" // Store the FPR comparison result
- "mov %[result], x8\n\t" // Store the call result
-
- "b 3f\n\t" // Goto end
-
- // Failed fpr verification.
- "1:\n\t"
- "mov x9, #1\n\t"
- "b 2b\n\t" // Goto finish-up
-
- // End
- "3:\n\t"
- : [result] "=r" (result)
- // Use the result from r0
- : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
- : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
- "memory"); // clobber.
-#elif defined(__mips__) && !defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
- "addiu $sp, $sp, -64\n\t"
- "sw $a0, 0($sp)\n\t"
- "sw $a1, 4($sp)\n\t"
- "sw $a2, 8($sp)\n\t"
- "sw $a3, 12($sp)\n\t"
- "sw $t0, 16($sp)\n\t"
- "sw $t1, 20($sp)\n\t"
- "sw $t2, 24($sp)\n\t"
- "sw $t3, 28($sp)\n\t"
- "sw $t4, 32($sp)\n\t"
- "sw $t5, 36($sp)\n\t"
- "sw $t6, 40($sp)\n\t"
- "sw $t7, 44($sp)\n\t"
- // Spill gp register since it is caller save.
- "sw $gp, 52($sp)\n\t"
-
- "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sw %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "addiu $sp, $sp, -20\n\t"
- "sw %[arg0], 0($sp)\n\t"
- "sw %[arg1], 4($sp)\n\t"
- "sw %[arg2], 8($sp)\n\t"
- "sw %[code], 12($sp)\n\t"
- "sw %[self], 16($sp)\n\t"
-
- // Load call params into the right registers.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $t9, 12($sp)\n\t"
- "lw $s1, 16($sp)\n\t"
- "addiu $sp, $sp, 20\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "lw $a0, 0($sp)\n\t"
- "lw $a1, 4($sp)\n\t"
- "lw $a2, 8($sp)\n\t"
- "lw $a3, 12($sp)\n\t"
- "lw $t0, 16($sp)\n\t"
- "lw $t1, 20($sp)\n\t"
- "lw $t2, 24($sp)\n\t"
- "lw $t3, 28($sp)\n\t"
- "lw $t4, 32($sp)\n\t"
- "lw $t5, 36($sp)\n\t"
- "lw $t6, 40($sp)\n\t"
- "lw $t7, 44($sp)\n\t"
- // Restore gp.
- "lw $gp, 52($sp)\n\t"
- "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
- "fp", "ra",
- "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
- "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
- "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
- "memory"); // clobber.
-#elif defined(__mips__) && defined(__LP64__)
- __asm__ __volatile__ (
- // Spill a0-a7 which we say we don't clobber. May contain args.
- "daddiu $sp, $sp, -64\n\t"
- "sd $a0, 0($sp)\n\t"
- "sd $a1, 8($sp)\n\t"
- "sd $a2, 16($sp)\n\t"
- "sd $a3, 24($sp)\n\t"
- "sd $a4, 32($sp)\n\t"
- "sd $a5, 40($sp)\n\t"
- "sd $a6, 48($sp)\n\t"
- "sd $a7, 56($sp)\n\t"
-
- "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
- "sd %[referrer], 0($sp)\n\t"
-
- // Push everything on the stack, so we don't rely on the order.
- "daddiu $sp, $sp, -40\n\t"
- "sd %[arg0], 0($sp)\n\t"
- "sd %[arg1], 8($sp)\n\t"
- "sd %[arg2], 16($sp)\n\t"
- "sd %[code], 24($sp)\n\t"
- "sd %[self], 32($sp)\n\t"
-
- // Load call params into the right registers.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $t9, 24($sp)\n\t"
- "ld $s1, 32($sp)\n\t"
- "daddiu $sp, $sp, 40\n\t"
-
- "jalr $t9\n\t" // Call the stub.
- "nop\n\t"
- "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
-
- // Restore stuff not named clobbered.
- "ld $a0, 0($sp)\n\t"
- "ld $a1, 8($sp)\n\t"
- "ld $a2, 16($sp)\n\t"
- "ld $a3, 24($sp)\n\t"
- "ld $a4, 32($sp)\n\t"
- "ld $a5, 40($sp)\n\t"
- "ld $a6, 48($sp)\n\t"
- "ld $a7, 56($sp)\n\t"
- "daddiu $sp, $sp, 64\n\t"
-
- "move %[result], $v0\n\t" // Store the call result.
- : [result] "=r" (result)
- : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
- [referrer] "r"(referrer)
- : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "fp", "ra",
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
- "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
- "f27", "f28", "f29", "f30", "f31",
- "memory"); // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
- // Note: Uses the native convention
- // TODO: Set the thread?
- __asm__ __volatile__(
- "pushq %[referrer]\n\t" // Push referrer
- "pushq (%%rsp)\n\t" // & 16B alignment padding
- ".cfi_adjust_cfa_offset 16\n\t"
- "call *%%rax\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop null and padding
- ".cfi_adjust_cfa_offset -16\n\t"
- : "=a" (result)
- // Use the result from rax
- : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "c"(referrer)
- // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
- : "rbx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "memory"); // clobber all
- // TODO: Should we clobber the other registers?
-#else
- UNUSED(arg0, arg1, arg2, code, referrer);
- LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
- result = 0;
-#endif
- // Pop transition.
- self->PopManagedStackFragment(fragment);
-
- fp_result = fpr_result;
- EXPECT_EQ(0U, fp_result);
-
- return result;
+ return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
}
// TODO: Set up a frame according to referrer's specs.
@@ -429,19 +85,55 @@ class StubTest : public CommonRuntimeTest {
size_t fpr_result = 0;
#if defined(__i386__)
// TODO: Set the thread?
+#define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
+#define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
__asm__ __volatile__(
- "movd %[hidden], %%xmm7\n\t"
- "subl $12, %%esp\n\t" // Align stack.
- "pushl %[referrer]\n\t" // Store referrer
+ "movd %[hidden], %%xmm7\n\t" // This is a memory op, so do this early. If it is off of
+ // esp, then we won't be able to access it after spilling.
+
+ // Spill 6 registers.
+ PUSH(%%ebx)
+ PUSH(%%ecx)
+ PUSH(%%edx)
+ PUSH(%%esi)
+ PUSH(%%edi)
+ PUSH(%%ebp)
+
+ // Store the inputs to the stack, but keep the referrer up top, less work.
+ PUSH(%[referrer]) // Align stack.
+ PUSH(%[referrer]) // Store referrer
+
+ PUSH(%[arg0])
+ PUSH(%[arg1])
+ PUSH(%[arg2])
+ PUSH(%[code])
+ // Now read them back into the required registers.
+ POP(%%edi)
+ POP(%%edx)
+ POP(%%ecx)
+ POP(%%eax)
+ // Call is prepared now.
+
"call *%%edi\n\t" // Call the stub
- "addl $16, %%esp" // Pop referrer
+ "addl $8, %%esp\n\t" // Pop referrer and padding.
+ ".cfi_adjust_cfa_offset -8\n\t"
+
+ // Restore 6 registers.
+ POP(%%ebp)
+ POP(%%edi)
+ POP(%%esi)
+ POP(%%edx)
+ POP(%%ecx)
+ POP(%%ebx)
+
: "=a" (result)
// Use the result from eax
- : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer), [hidden]"m"(hidden)
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
+ [referrer]"r"(referrer), [hidden]"m"(hidden)
// This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
- : "memory"); // clobber.
- // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
- // but compilation fails when declaring that.
+ : "memory", "xmm7"); // clobber.
+#undef PUSH
+#undef POP
#elif defined(__arm__)
__asm__ __volatile__(
"push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
@@ -743,23 +435,72 @@ class StubTest : public CommonRuntimeTest {
"f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
"f27", "f28", "f29", "f30", "f31",
"memory"); // clobber.
-#elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
- // Note: Uses the native convention
+#elif defined(__x86_64__) && !defined(__APPLE__)
+#define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
+#define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
+ // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
+ // restoring almost all registers.
// TODO: Set the thread?
__asm__ __volatile__(
- "pushq %[referrer]\n\t" // Push referrer
- "pushq (%%rsp)\n\t" // & 16B alignment padding
- ".cfi_adjust_cfa_offset 16\n\t"
- "call *%%rbx\n\t" // Call the stub
- "addq $16, %%rsp\n\t" // Pop null and padding
+ // Spill almost everything (except rax, rsp). 14 registers.
+ PUSH(%%rbx)
+ PUSH(%%rcx)
+ PUSH(%%rdx)
+ PUSH(%%rsi)
+ PUSH(%%rdi)
+ PUSH(%%rbp)
+ PUSH(%%r8)
+ PUSH(%%r9)
+ PUSH(%%r10)
+ PUSH(%%r11)
+ PUSH(%%r12)
+ PUSH(%%r13)
+ PUSH(%%r14)
+ PUSH(%%r15)
+
+ PUSH(%[referrer]) // Push referrer & 16B alignment padding
+ PUSH(%[referrer])
+
+ // Now juggle the input registers.
+ PUSH(%[arg0])
+ PUSH(%[arg1])
+ PUSH(%[arg2])
+ PUSH(%[hidden])
+ PUSH(%[code])
+ POP(%%r8)
+ POP(%%rax)
+ POP(%%rdx)
+ POP(%%rsi)
+ POP(%%rdi)
+
+ "call *%%r8\n\t" // Call the stub
+ "addq $16, %%rsp\n\t" // Pop null and padding
".cfi_adjust_cfa_offset -16\n\t"
+
+ POP(%%r15)
+ POP(%%r14)
+ POP(%%r13)
+ POP(%%r12)
+ POP(%%r11)
+ POP(%%r10)
+ POP(%%r9)
+ POP(%%r8)
+ POP(%%rbp)
+ POP(%%rdi)
+ POP(%%rsi)
+ POP(%%rdx)
+ POP(%%rcx)
+ POP(%%rbx)
+
: "=a" (result)
// Use the result from rax
- : "D"(arg0), "S"(arg1), "d"(arg2), "b"(code), [referrer] "c"(referrer), [hidden] "a"(hidden)
- // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
- : "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
- "memory"); // clobber all
- // TODO: Should we clobber the other registers?
+ : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
+ [referrer] "r"(referrer), [hidden] "r"(hidden)
+ // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
+ // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
+ : "memory"); // We spill and restore (almost) all registers, so only mention memory here.
+#undef PUSH
+#undef POP
#else
UNUSED(arg0, arg1, arg2, code, referrer, hidden);
LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
@@ -1970,11 +1711,11 @@ static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
for (size_t i = 0; i < arraysize(values); ++i) {
// 64 bit FieldSet stores the set value in the second register.
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
- 0U,
- values[i],
- StubTest::GetEntrypoint(self, kQuickSet64Static),
- self,
- referrer);
+ 0U,
+ values[i],
+ StubTest::GetEntrypoint(self, kQuickSet64Static),
+ self,
+ referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
0U, 0U,
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 987ad60fd8..0d88dd0dc5 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -16,9 +16,10 @@
#include "context_x86.h"
-#include "art_method-inl.h"
+#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "stack.h"
namespace art {
namespace x86 {
@@ -37,8 +38,8 @@ void X86Context::Reset() {
}
void X86Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 10fc281e3d..019546f2ce 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -31,12 +30,7 @@ extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
// Read barrier entrypoints.
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 3afc4d545f..2f485ae644 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -16,6 +16,8 @@
#include "asm_support_x86.S"
+#include "arch/quick_alloc_entrypoints.S"
+
// For x86, the CFA is esp+4, the address above the pushed return address on the stack.
/*
@@ -760,7 +762,7 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
ret // return
@@ -785,195 +787,14 @@ MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
END_MACRO
// Generate the allocation entrypoints for each allocator.
-// TODO: use arch/quick_alloc_entrypoints.S. Currently we don't as we need to use concatenation
-// macros to work around differences between OS/X's as and binutils as (OS/X lacks named arguments
-// to macros and the VAR macro won't concatenate arguments properly), this also breaks having
-// multi-line macros that use each other (hence using 1 macro per newline below).
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
- FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
-
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
-
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
-
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 3dc7d71df4..12c94bc598 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -16,9 +16,10 @@
#include "context_x86_64.h"
-#include "art_method-inl.h"
+#include "art_code.h"
#include "base/bit_utils.h"
#include "quick/quick_method_frame_info.h"
+#include "stack.h"
namespace art {
namespace x86_64 {
@@ -37,8 +38,8 @@ void X86_64Context::Reset() {
}
void X86_64Context::FillCalleeSaves(const StackVisitor& fr) {
- ArtMethod* method = fr.GetMethod();
- const QuickMethodFrameInfo frame_info = method->GetQuickFrameInfo();
+ ArtCode code = fr.GetCurrentCode();
+ const QuickMethodFrameInfo frame_info = code.GetQuickFrameInfo();
int spill_pos = 0;
// Core registers come first, from the highest down to the lowest.
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 5cc72e3c4b..eae09ee1f4 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
@@ -32,16 +31,11 @@ extern "C" uint32_t art_quick_assignable_from_code(const mirror::Class* klass,
// Read barrier entrypoints.
extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror::Object*, uint32_t);
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints) {
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
#if defined(__APPLE__)
- UNUSED(ipoints, jpoints, qpoints);
+ UNUSED(jpoints, qpoints);
UNIMPLEMENTED(FATAL);
#else
- // Interpreter
- ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
- ipoints->pInterpreterToCompiledCodeBridge = artInterpreterToCompiledCodeBridge;
-
// JNI
jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 1133203e31..95f0ccb419 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -16,6 +16,8 @@
#include "asm_support_x86_64.S"
+#include "arch/quick_alloc_entrypoints.S"
+
MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
// Create space for ART FP callee-saved registers
subq MACRO_LITERAL(4 * 8), %rsp
@@ -780,7 +782,7 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
ret // return
@@ -806,113 +808,9 @@ MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
END_MACRO
// Generate the allocation entrypoints for each allocator.
-// TODO: use arch/quick_alloc_entrypoints.S. Currently we don't as we need to use concatenation
-// macros to work around differences between OS/X's as and binutils as (OS/X lacks named arguments
-// to macros and the VAR macro won't concatenate arguments properly), this also breaks having
-// multi-line macros that use each other (hence using 1 macro per newline below).
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object ## c_suffix, artAllocObjectFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_check_and_alloc_array ## c_suffix, artCheckAndAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check ## c_suffix, artCheckAndAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
- FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc, DlMalloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc, DlMalloc)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
-
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc, RosAlloc)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_rosalloc_instrumented, RosAllocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_rosalloc_instrumented, RosAllocInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer, BumpPointer)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer, BumpPointer)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_bump_pointer_instrumented, BumpPointerInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_bump_pointer_instrumented, BumpPointerInstrumented)
-
+// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
// RDI: uint32_t type_idx, RSI: ArtMethod*
@@ -954,95 +852,18 @@ DEFINE_FUNCTION art_quick_alloc_object_tlab
movl %edx, MIRROR_OBJECT_CLASS_OFFSET(%rax)
ret // Fast path succeeded.
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC
// Outgoing argument set up
- movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*)
- RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO // return or deliver exception
+ movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
+ call SYMBOL(artAllocObjectFromCodeTLAB) // cxx_name(arg0, arg1, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_alloc_object_tlab
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab_instrumented, TLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab_instrumented, TLABInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region, Region)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region, Region)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_instrumented, RegionInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_instrumented, RegionInstrumented)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab_instrumented, RegionTLABInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab_instrumented, RegionTLABInstrumented)
-
-ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
+ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/art_code.cc b/runtime/art_code.cc
new file mode 100644
index 0000000000..ad0b170079
--- /dev/null
+++ b/runtime/art_code.cc
@@ -0,0 +1,333 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_code.h"
+
+#include "art_method.h"
+#include "art_method-inl.h"
+#include "class_linker.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "handle_scope.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "mapping_table.h"
+#include "oat.h"
+#include "runtime.h"
+#include "utils.h"
+
+namespace art {
+
+ // Converts a dex PC to a native PC.
+uintptr_t ArtCode::ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ if (IsOptimized(sizeof(void*))) {
+ // Optimized code does not have a mapping table. Search for the dex-to-pc
+ // mapping in stack maps.
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+
+ // All stack maps are stored in the same CodeItem section, safepoint stack
+ // maps first, then catch stack maps. We use `is_for_catch_handler` to select
+ // the order of iteration.
+ StackMap stack_map =
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
+ : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ if (stack_map.IsValid()) {
+ return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ }
+ } else {
+ MappingTable table((entry_point != nullptr) ? GetMappingTable(sizeof(void*)) : nullptr);
+ if (table.TotalSize() == 0) {
+ DCHECK_EQ(dex_pc, 0U);
+ return 0; // Special no mapping/pc == 0 case
+ }
+ // Assume the caller wants a dex-to-pc mapping so check here first.
+ typedef MappingTable::DexToPcIterator It;
+ for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ // Now check pc-to-dex mappings.
+ typedef MappingTable::PcToDexIterator It2;
+ for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.DexPc() == dex_pc) {
+ return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
+ }
+ }
+ }
+
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
+ << " in " << PrettyMethod(method_);
+ }
+ return UINTPTR_MAX;
+}
+
+bool ArtCode::IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Temporary solution for detecting if a method has been optimized: the compiler
+ // does not create a GC map. Instead, the vmap table contains the stack map
+ // (as in stack_map.h).
+ return !method_->IsNative()
+ && method_->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
+ && GetQuickOatEntryPoint(pointer_size) != nullptr
+ && GetNativeGcMap(pointer_size) == nullptr;
+}
+
+CodeInfo ArtCode::GetOptimizedCodeInfo() {
+ DCHECK(IsOptimized(sizeof(void*)));
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(sizeof(void*)));
+ DCHECK(code_pointer != nullptr);
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+ const void* data =
+ reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
+ return CodeInfo(data);
+}
+
+uintptr_t ArtCode::NativeQuickPcOffset(const uintptr_t pc) {
+ const void* quick_entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
+ CHECK_EQ(quick_entry_point,
+ Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*)));
+ return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
+}
+
+uint32_t ArtCode::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
+ const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
+ uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
+ if (IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetOptimizedCodeInfo();
+ StackMapEncoding encoding = code_info.ExtractEncoding();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ if (stack_map.IsValid()) {
+ return stack_map.GetDexPc(encoding);
+ }
+ } else {
+ MappingTable table(entry_point != nullptr ? GetMappingTable(sizeof(void*)) : nullptr);
+ if (table.TotalSize() == 0) {
+ // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
+ // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
+ DCHECK(method_->IsNative() || method_->IsCalleeSaveMethod() || method_->IsProxyMethod())
+ << PrettyMethod(method_);
+ return DexFile::kDexNoIndex; // Special no mapping case
+ }
+ // Assume the caller wants a pc-to-dex mapping so check here first.
+ typedef MappingTable::PcToDexIterator It;
+ for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ // Now check dex-to-pc mappings.
+ typedef MappingTable::DexToPcIterator It2;
+ for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
+ if (cur.NativePcOffset() == sought_offset) {
+ return cur.DexPc();
+ }
+ }
+ }
+ if (abort_on_failure) {
+ LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
+ << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
+ << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
+ << ") in " << PrettyMethod(method_);
+ }
+ return DexFile::kDexNoIndex;
+}
+
+const uint8_t* ArtCode::GetNativeGcMap(size_t pointer_size) {
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetVmapTable(size_t pointer_size) {
+ CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+const uint8_t* ArtCode::GetMappingTable(size_t pointer_size) {
+ const void* code_pointer = EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
+ if (code_pointer == nullptr) {
+ return nullptr;
+ }
+ uint32_t offset =
+ reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
+ if (UNLIKELY(offset == 0u)) {
+ return nullptr;
+ }
+ return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
+}
+
+// Counts the number of references in the parameter list of the corresponding method.
+// Note: Thus does _not_ include "this" for non-static methods.
+static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t shorty_len;
+ const char* shorty = method->GetShorty(&shorty_len);
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ return refs;
+}
+
+QuickMethodFrameInfo ArtCode::GetQuickFrameInfo() {
+ Runtime* runtime = Runtime::Current();
+
+ if (UNLIKELY(method_->IsAbstract())) {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+
+ // This goes before IsProxyMethod since runtime methods have a null declaring class.
+ if (UNLIKELY(method_->IsRuntimeMethod())) {
+ return runtime->GetRuntimeMethodFrameInfo(method_);
+ }
+
+ // For Proxy method we add special handling for the direct method case (there is only one
+ // direct method - constructor). Direct method is cloned from original
+ // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
+ // quick compiled method without any stubs. So the frame info should be returned as it is a
+ // quick method not a stub. However, if instrumentation stubs are installed, the
+ // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
+ // oat code pointer, thus we have to add a special case here.
+ if (UNLIKELY(method_->IsProxyMethod())) {
+ if (method_->IsDirect()) {
+ CHECK(method_->IsConstructor());
+ const void* code_pointer =
+ EntryPointToCodePointer(method_->GetEntryPointFromQuickCompiledCode());
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+ } else {
+ return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+ }
+ }
+
+ const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method_, sizeof(void*));
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods. And we really shouldn't see a failure for non-native methods here.
+ DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
+
+ if (class_linker->IsQuickGenericJniStub(entry_point)) {
+ // Generic JNI frame.
+ DCHECK(method_->IsNative());
+ uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method_) + 1;
+ size_t scope_size = HandleScope::SizeOf(handle_refs);
+ QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
+
+ // Callee saves + handle scope + method ref + alignment
+ // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
+ size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
+ sizeof(ArtMethod*) + scope_size, kStackAlignment);
+ return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+ }
+
+ const void* code_pointer = EntryPointToCodePointer(entry_point);
+ return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
+}
+
+void ArtCode::AssertPcIsWithinQuickCode(uintptr_t pc) {
+ if (method_->IsNative() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+ return;
+ }
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ return;
+ }
+ const void* code = method_->GetEntryPointFromQuickCompiledCode();
+ if (code == GetQuickInstrumentationEntryPoint()) {
+ return;
+ }
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickResolutionStub(code)) {
+ return;
+ }
+ // If we are the JIT then we may have just compiled the method after the
+ // IsQuickToInterpreterBridge check.
+ jit::Jit* const jit = Runtime::Current()->GetJit();
+ if (jit != nullptr &&
+ jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ return;
+ }
+
+ uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
+ EntryPointToCodePointer(code))[-1].code_size_;
+ uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
+ CHECK(code_start <= pc && pc <= (code_start + code_size))
+ << PrettyMethod(method_)
+ << " pc=" << std::hex << pc
+ << " code=" << code
+ << " size=" << code_size;
+}
+
+bool ArtCode::PcIsWithinQuickCode(uintptr_t pc) {
+ /*
+ * During a stack walk, a return PC may point past-the-end of the code
+ * in the case that the last instruction is a call that isn't expected to
+ * return. Thus, we check <= code + GetCodeSize().
+ *
+ * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
+ */
+ uintptr_t code = reinterpret_cast<uintptr_t>(EntryPointToCodePointer(
+ method_->GetEntryPointFromQuickCompiledCode()));
+ if (code == 0) {
+ return pc == 0;
+ }
+ uintptr_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
+ return code <= pc && pc <= (code + code_size);
+}
+
+const void* ArtCode::GetQuickOatEntryPoint(size_t pointer_size) {
+ if (method_->IsAbstract() || method_->IsRuntimeMethod() || method_->IsProxyMethod()) {
+ return nullptr;
+ }
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(method_, pointer_size);
+ // On failure, instead of null we get the quick-generic-jni-trampoline for native method
+ // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
+ // for non-native methods.
+ if (class_linker->IsQuickToInterpreterBridge(code) ||
+ class_linker->IsQuickGenericJniStub(code)) {
+ return nullptr;
+ }
+ return code;
+}
+
+} // namespace art
diff --git a/runtime/art_code.h b/runtime/art_code.h
new file mode 100644
index 0000000000..1d2d898ed6
--- /dev/null
+++ b/runtime/art_code.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ART_CODE_H_
+#define ART_RUNTIME_ART_CODE_H_
+
+#include "base/mutex.h"
+#include "offsets.h"
+#include "quick/quick_method_frame_info.h"
+#include "stack_map.h"
+
+namespace art {
+
+class ArtMethod;
+
+class ArtCode FINAL {
+ public:
+ explicit ArtCode(ArtMethod** method) : method_(*method) {}
+ explicit ArtCode(ArtMethod* method) : method_(method) {}
+ ArtCode() : method_(nullptr) {}
+
+ // Converts a dex PC to a native PC.
+ uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
+ bool is_for_catch_handler,
+ bool abort_on_failure = true)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Converts a native PC to a dex PC.
+ uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
+ const uint8_t* GetNativeGcMap(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const uint8_t* GetVmapTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ const uint8_t* GetMappingTable(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
+ FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return FrameOffset(GetFrameSizeInBytes() - sizeof(void*));
+ }
+
+ template <bool kCheckFrameSize = true>
+ uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
+ if (kCheckFrameSize) {
+ DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
+ }
+ return result;
+ }
+
+ const void* GetQuickOatEntryPoint(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ bool PcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
+ DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
+ return FrameOffset(handle_scope_offset);
+ }
+
+ ArtMethod* GetMethod() const { return method_; }
+
+ private:
+ ArtMethod* method_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ART_CODE_H_
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 632a50f15c..f741732046 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -212,18 +212,6 @@ inline mirror::Class* ArtMethod::GetClassFromTypeIndex(uint16_t type_idx,
return type;
}
-inline uint32_t ArtMethod::GetCodeSize() {
- DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
- return GetCodeSize(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
-}
-
-inline uint32_t ArtMethod::GetCodeSize(const void* code) {
- if (code == nullptr) {
- return 0u;
- }
- return reinterpret_cast<const OatQuickMethodHeader*>(code)[-1].code_size_;
-}
-
inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
switch (type) {
case kStatic:
@@ -231,8 +219,9 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
case kDirect:
return !IsDirect() || IsStatic();
case kVirtual: {
+ // We have an error if we are direct or a non-default, non-miranda interface method.
mirror::Class* methods_class = GetDeclaringClass();
- return IsDirect() || (methods_class->IsInterface() && !IsMiranda());
+ return IsDirect() || (methods_class->IsInterface() && !IsDefault() && !IsMiranda());
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
@@ -248,85 +237,6 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
}
}
-inline uint32_t ArtMethod::GetQuickOatCodeOffset() {
- DCHECK(!Runtime::Current()->IsStarted());
- return PointerToLowMemUInt32(GetEntryPointFromQuickCompiledCode());
-}
-
-inline void ArtMethod::SetQuickOatCodeOffset(uint32_t code_offset) {
- DCHECK(!Runtime::Current()->IsStarted());
- SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(code_offset));
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetMappingTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetMappingTable(const void* code_pointer, size_t pointer_size) {
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].mapping_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetVmapTable(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetVmapTable(const void* code_pointer, size_t pointer_size) {
- CHECK(!IsOptimized(pointer_size)) << "Unimplemented vmap table for optimized compiler";
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
-inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
- DCHECK(IsOptimized(sizeof(void*)));
- const void* code_pointer = GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
- const void* data =
- reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- return CodeInfo(data);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(size_t pointer_size) {
- const void* code_pointer = GetQuickOatCodePointer(pointer_size);
- if (code_pointer == nullptr) {
- return nullptr;
- }
- return GetNativeGcMap(code_pointer, pointer_size);
-}
-
-inline const uint8_t* ArtMethod::GetNativeGcMap(const void* code_pointer, size_t pointer_size) {
- DCHECK(code_pointer != nullptr);
- DCHECK_EQ(code_pointer, GetQuickOatCodePointer(pointer_size));
- uint32_t offset =
- reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].gc_map_offset_;
- if (UNLIKELY(offset == 0u)) {
- return nullptr;
- }
- return reinterpret_cast<const uint8_t*>(code_pointer) - offset;
-}
-
inline bool ArtMethod::IsRuntimeMethod() {
return dex_method_index_ == DexFile::kDexNoIndex;
}
@@ -367,20 +277,6 @@ inline bool ArtMethod::IsImtUnimplementedMethod() {
return result;
}
-inline uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc) {
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(
- this, sizeof(void*));
- return pc - reinterpret_cast<uintptr_t>(code);
-}
-
-inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo(const void* code_pointer) {
- DCHECK(code_pointer != nullptr);
- if (kIsDebugBuild && !IsProxyMethod()) {
- CHECK_EQ(code_pointer, GetQuickOatCodePointer(sizeof(void*)));
- }
- return reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].frame_info_;
-}
-
inline const DexFile* ArtMethod::GetDexFile() {
return GetDexCache()->GetDexFile();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 92648b9b1b..f9d9077261 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -180,98 +180,6 @@ uint32_t ArtMethod::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfil
return DexFile::kDexNoIndex;
}
-uint32_t ArtMethod::ToDexPc(const uintptr_t pc, bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
- if (IsOptimized(sizeof(void*))) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
- if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ?
- GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- // NOTE: Special methods (see Mir2Lir::GenSpecialCase()) have an empty mapping
- // but they have no suspend checks and, consequently, we never call ToDexPc() for them.
- DCHECK(IsNative() || IsCalleeSaveMethod() || IsProxyMethod()) << PrettyMethod(this);
- return DexFile::kDexNoIndex; // Special no mapping case
- }
- // Assume the caller wants a pc-to-dex mapping so check here first.
- typedef MappingTable::PcToDexIterator It;
- for (It cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- // Now check dex-to-pc mappings.
- typedef MappingTable::DexToPcIterator It2;
- for (It2 cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.NativePcOffset() == sought_offset) {
- return cur.DexPc();
- }
- }
- }
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find Dex offset for PC offset " << reinterpret_cast<void*>(sought_offset)
- << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
- << " current entry_point=" << GetQuickOatEntryPoint(sizeof(void*))
- << ") in " << PrettyMethod(this);
- }
- return DexFile::kDexNoIndex;
-}
-
-uintptr_t ArtMethod::ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure) {
- const void* entry_point = GetQuickOatEntryPoint(sizeof(void*));
- if (IsOptimized(sizeof(void*))) {
- // Optimized code does not have a mapping table. Search for the dex-to-pc
- // mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
-
- // All stack maps are stored in the same CodeItem section, safepoint stack
- // maps first, then catch stack maps. We use `is_for_catch_handler` to select
- // the order of iteration.
- StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
- if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
- }
- } else {
- MappingTable table(entry_point != nullptr ?
- GetMappingTable(EntryPointToCodePointer(entry_point), sizeof(void*)) : nullptr);
- if (table.TotalSize() == 0) {
- DCHECK_EQ(dex_pc, 0U);
- return 0; // Special no mapping/pc == 0 case
- }
- // Assume the caller wants a dex-to-pc mapping so check here first.
- typedef MappingTable::DexToPcIterator It;
- for (It cur = table.DexToPcBegin(), end = table.DexToPcEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- // Now check pc-to-dex mappings.
- typedef MappingTable::PcToDexIterator It2;
- for (It2 cur = table.PcToDexBegin(), end = table.PcToDexEnd(); cur != end; ++cur) {
- if (cur.DexPc() == dex_pc) {
- return reinterpret_cast<uintptr_t>(entry_point) + cur.NativePcOffset();
- }
- }
- }
-
- if (abort_on_failure) {
- LOG(FATAL) << "Failed to find native offset for dex pc 0x" << std::hex << dex_pc
- << " in " << PrettyMethod(this);
- }
- return UINTPTR_MAX;
-}
-
uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
uint32_t dex_pc, bool* has_no_move_exception) {
const DexFile::CodeItem* code_item = GetCodeItem();
@@ -322,76 +230,6 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
return found_dex_pc;
}
-void ArtMethod::AssertPcIsWithinQuickCode(uintptr_t pc) {
- if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
- return;
- }
- if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
- return;
- }
- const void* code = GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
- return;
- }
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickResolutionStub(code)) {
- return;
- }
- // If we are the JIT then we may have just compiled the method after the
- // IsQuickToInterpreterBridge check.
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
- return;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- CHECK(PcIsWithinQuickCode(reinterpret_cast<uintptr_t>(code), pc))
- << PrettyMethod(this)
- << " pc=" << std::hex << pc
- << " code=" << code
- << " size=" << GetCodeSize(
- EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
-}
-
-bool ArtMethod::IsEntrypointInterpreter() {
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
- return oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode();
-}
-
-const void* ArtMethod::GetQuickOatEntryPoint(size_t pointer_size) {
- if (IsAbstract() || IsRuntimeMethod() || IsProxyMethod()) {
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, pointer_size);
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods.
- if (class_linker->IsQuickToInterpreterBridge(code) ||
- class_linker->IsQuickGenericJniStub(code)) {
- return nullptr;
- }
- return code;
-}
-
-#ifndef NDEBUG
-uintptr_t ArtMethod::NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point) {
- CHECK_NE(quick_entry_point, GetQuickToInterpreterBridge());
- CHECK_EQ(quick_entry_point,
- Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*)));
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
-}
-#endif
-
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -435,8 +273,9 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- DCHECK(!runtime->UseJit());
- CHECK(IsEntrypointInterpreter())
+ CHECK(!runtime->UseJit());
+ const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this);
+ CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
<< "Don't call compiled code when -Xint " << PrettyMethod(this);
}
@@ -480,74 +319,6 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
self->PopManagedStackFragment(fragment);
}
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t shorty_len;
- const char* shorty = method->GetShorty(&shorty_len);
- uint32_t refs = 0;
- for (uint32_t i = 1; i < shorty_len ; ++i) {
- if (shorty[i] == 'L') {
- refs++;
- }
- }
- return refs;
-}
-
-QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
- Runtime* runtime = Runtime::Current();
-
- if (UNLIKELY(IsAbstract())) {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
-
- // This goes before IsProxyMethod since runtime methods have a null declaring class.
- if (UNLIKELY(IsRuntimeMethod())) {
- return runtime->GetRuntimeMethodFrameInfo(this);
- }
-
- // For Proxy method we add special handling for the direct method case (there is only one
- // direct method - constructor). Direct method is cloned from original
- // java.lang.reflect.Proxy class together with code and as a result it is executed as usual
- // quick compiled method without any stubs. So the frame info should be returned as it is a
- // quick method not a stub. However, if instrumentation stubs are installed, the
- // instrumentation->GetQuickCodeFor() returns the artQuickProxyInvokeHandler instead of an
- // oat code pointer, thus we have to add a special case here.
- if (UNLIKELY(IsProxyMethod())) {
- if (IsDirect()) {
- CHECK(IsConstructor());
- return GetQuickFrameInfo(EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode()));
- } else {
- return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
- }
- }
-
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
- ClassLinker* class_linker = runtime->GetClassLinker();
- // On failure, instead of null we get the quick-generic-jni-trampoline for native method
- // indicating the generic JNI, or the quick-to-interpreter-bridge (but not the trampoline)
- // for non-native methods. And we really shouldn't see a failure for non-native methods here.
- DCHECK(!class_linker->IsQuickToInterpreterBridge(entry_point));
-
- if (class_linker->IsQuickGenericJniStub(entry_point)) {
- // Generic JNI frame.
- DCHECK(IsNative());
- uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(this) + 1;
- size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-
- // Callee saves + handle scope + method ref + alignment
- // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
- size_t frame_size = RoundUp(callee_info.FrameSizeInBytes() - sizeof(void*) +
- sizeof(ArtMethod*) + scope_size, kStackAlignment);
- return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
- }
-
- const void* code_pointer = EntryPointToCodePointer(entry_point);
- return GetQuickFrameInfo(code_pointer);
-}
-
void ArtMethod::RegisterNative(const void* native_method, bool is_fast) {
CHECK(IsNative()) << PrettyMethod(this);
CHECK(!IsFastNative()) << PrettyMethod(this);
@@ -590,16 +361,6 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
-const uint8_t* ArtMethod::GetQuickenedInfo() {
- bool found = false;
- OatFile::OatMethod oat_method =
- Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
- if (!found || (oat_method.GetQuickCode() != nullptr)) {
- return nullptr;
- }
- return oat_method.GetVmapTable();
-}
-
ProfilingInfo* ArtMethod::CreateProfilingInfo() {
DCHECK(!Runtime::Current()->IsAotCompiler());
ProfilingInfo* info = ProfilingInfo::Create(this);
@@ -613,4 +374,14 @@ ProfilingInfo* ArtMethod::CreateProfilingInfo() {
}
}
+const uint8_t* ArtMethod::GetQuickenedInfo() {
+ bool found = false;
+ OatFile::OatMethod oat_method =
+ Runtime::Current()->GetClassLinker()->FindOatMethodFor(this, &found);
+ if (!found || (oat_method.GetQuickCode() != nullptr)) {
+ return nullptr;
+ }
+ return oat_method.GetVmapTable();
+}
+
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 0315c3a953..9743250cc0 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_ART_METHOD_H_
#define ART_RUNTIME_ART_METHOD_H_
+#include "base/bit_utils.h"
#include "base/casts.h"
#include "dex_file.h"
#include "gc_root.h"
@@ -24,10 +25,8 @@
#include "method_reference.h"
#include "modifiers.h"
#include "mirror/object.h"
-#include "quick/quick_method_frame_info.h"
#include "read_barrier_option.h"
#include "stack.h"
-#include "stack_map.h"
#include "utils.h"
namespace art {
@@ -136,6 +135,11 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccMiranda) != 0;
}
+ // This is set by the class linker.
+ bool IsDefault() {
+ return (GetAccessFlags() & kAccDefault) != 0;
+ }
+
bool IsNative() {
return (GetAccessFlags() & kAccNative) != 0;
}
@@ -164,14 +168,9 @@ class ArtMethod FINAL {
SetAccessFlags(GetAccessFlags() | kAccPreverified);
}
- bool IsOptimized(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) {
- // Temporary solution for detecting if a method has been optimized: the compiler
- // does not create a GC map. Instead, the vmap table contains the stack map
- // (as in stack_map.h).
- return !IsNative()
- && GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
- && GetQuickOatCodePointer(pointer_size) != nullptr
- && GetNativeGcMap(pointer_size) == nullptr;
+ // Returns true if this method could be overridden by a default method.
+ bool IsOverridableByDefaultMethod() {
+ return IsDefault() || IsAbstract();
}
bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -280,94 +279,6 @@ class ArtMethod FINAL {
entry_point_from_quick_compiled_code, pointer_size);
}
- uint32_t GetCodeSize() SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Check whether the given PC is within the quick compiled code associated with this method's
- // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
- // debug purposes.
- bool PcIsWithinQuickCode(uintptr_t pc) {
- return PcIsWithinQuickCode(
- reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
- }
-
- void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Returns true if the entrypoint points to the interpreter, as
- // opposed to the compiled code, that is, this method will be
- // interpretered on invocation.
- bool IsEntrypointInterpreter() SHARED_REQUIRES(Locks::mutator_lock_);
-
- uint32_t GetQuickOatCodeOffset();
- void SetQuickOatCodeOffset(uint32_t code_offset);
-
- ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
- uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
- // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
- // least 2 byte aligned.
- code &= ~0x1;
- return reinterpret_cast<const void*>(code);
- }
-
- // Actual entry point pointer to compiled oat code or null.
- const void* GetQuickOatEntryPoint(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- // Actual pointer to compiled oat code or null.
- const void* GetQuickOatCodePointer(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
- }
-
- // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
- const uint8_t* GetMappingTable(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
- const uint8_t* GetVmapTable(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- CodeInfo GetOptimizedCodeInfo() SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
- const uint8_t* GetNativeGcMap(size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() SHARED_REQUIRES(Locks::mutator_lock_) {
- uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
- if (kCheckFrameSize) {
- DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
- }
- return result;
- }
-
- QuickMethodFrameInfo GetQuickFrameInfo() SHARED_REQUIRES(Locks::mutator_lock_);
- QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- FrameOffset GetReturnPcOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- return GetReturnPcOffset(GetFrameSizeInBytes());
- }
-
- FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
- return FrameOffset(frame_size_in_bytes - sizeof(void*));
- }
-
- FrameOffset GetHandleScopeOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
- constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
- DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
- return FrameOffset(handle_scope_offset);
- }
-
void RegisterNative(const void* native_method, bool is_fast)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -429,27 +340,6 @@ class ArtMethod FINAL {
bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
- uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_REQUIRES(Locks::mutator_lock_);
-#ifdef NDEBUG
- uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_REQUIRES(Locks::mutator_lock_) {
- return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
- }
-#else
- uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
- SHARED_REQUIRES(Locks::mutator_lock_);
-#endif
-
- // Converts a native PC to a dex PC.
- uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Converts a dex PC to a native PC.
- uintptr_t ToNativeQuickPc(const uint32_t dex_pc,
- bool is_for_catch_handler,
- bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
return MethodReference(GetDexFile(), GetDexMethodIndex());
}
@@ -542,6 +432,8 @@ class ArtMethod FINAL {
return ++hotness_count_;
}
+ const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+
protected:
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// The class we are a part of.
@@ -622,24 +514,6 @@ class ArtMethod FINAL {
}
}
- // Code points to the start of the quick code.
- static uint32_t GetCodeSize(const void* code);
-
- static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
- if (code == 0) {
- return pc == 0;
- }
- /*
- * During a stack walk, a return PC may point past-the-end of the code
- * in the case that the last instruction is a call that isn't expected to
- * return. Thus, we check <= code + GetCodeSize().
- *
- * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
- */
- return code <= pc && pc <= code + GetCodeSize(
- EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
- }
-
DISALLOW_COPY_AND_ASSIGN(ArtMethod); // Need to use CopyFrom to deal with 32 vs 64 bits.
};
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 1b569fe74a..69f6fe96ff 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,6 +19,7 @@
#if defined(__cplusplus)
#include "art_method.h"
+#include "gc/allocator/rosalloc.h"
#include "lock_word.h"
#include "mirror/class.h"
#include "mirror/string.h"
@@ -53,6 +54,14 @@ static inline void CheckAsmSupportOffsetsAndSizes() {
#define ADD_TEST_EQ(x, y)
#endif
+#if defined(__LP64__)
+#define POINTER_SIZE_SHIFT 3
+#else
+#define POINTER_SIZE_SHIFT 2
+#endif
+ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
+ static_cast<size_t>(__SIZEOF_POINTER__))
+
// Size of references to the heap on the stack.
#define STACK_REFERENCE_SIZE 4
ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
@@ -62,6 +71,10 @@ ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReferenc
ADD_TEST_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE),
sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+#define COMPRESSED_REFERENCE_SIZE_SHIFT 2
+ADD_TEST_EQ(static_cast<size_t>(1U << COMPRESSED_REFERENCE_SIZE_SHIFT),
+ static_cast<size_t>(COMPRESSED_REFERENCE_SIZE))
+
// Note: these callee save methods loads require read barriers.
// Offset of field Runtime::callee_save_methods_[kSaveAll]
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
@@ -109,7 +122,7 @@ ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 152 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 150 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
@@ -120,6 +133,18 @@ ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.rosalloc_runs.
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
+ art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
+#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 34 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
+ art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
+#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 35 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
+ art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
@@ -236,6 +261,44 @@ ADD_TEST_EQ(static_cast<size_t>(OBJECT_ALIGNMENT_MASK), art::kObjectAlignment -
ADD_TEST_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED),
~static_cast<uint32_t>(art::kObjectAlignment - 1))
+#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
+ADD_TEST_EQ(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 4
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSizeShift))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 15
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff0
+ADD_TEST_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32),
+ ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff0
+ADD_TEST_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64),
+ ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))
+
+#define ROSALLOC_SLOT_NEXT_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset()))
+// Assert this so that we can avoid zeroing the next field by installing the class pointer.
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f2fc074fb..f1d0a5fbff 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -49,15 +49,13 @@ class NoopAllocator FINAL : public Allocator {
explicit NoopAllocator() {}
~NoopAllocator() {}
- void* Alloc(size_t size) {
- UNUSED(size);
+ void* Alloc(size_t size ATTRIBUTE_UNUSED) {
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
UNREACHABLE();
}
- void Free(void* p) {
+ void Free(void* p ATTRIBUTE_UNUSED) {
// Noop.
- UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 3422625282..f9960acfb8 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -115,9 +115,7 @@ class TrackingAllocatorImpl : public std::allocator<T> {
// Used internally by STL data structures.
template <class U>
- TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) noexcept {
- UNUSED(alloc);
- }
+ TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {}
// Used internally by STL data structures.
TrackingAllocatorImpl() noexcept {
@@ -131,8 +129,7 @@ class TrackingAllocatorImpl : public std::allocator<T> {
typedef TrackingAllocatorImpl<U, kTag> other;
};
- pointer allocate(size_type n, const_pointer hint = 0) {
- UNUSED(hint);
+ pointer allocate(size_type n, const_pointer hint ATTRIBUTE_UNUSED = 0) {
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 345428c2a6..1704688565 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -23,7 +23,6 @@
#include "mem_map.h"
#include "mutex.h"
#include "thread-inl.h"
-#include "base/memory_tool.h"
namespace art {
@@ -94,6 +93,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"StackMapStm ",
"CodeGen ",
"ParallelMove ",
+ "GraphChecker ",
};
template <bool kCount>
@@ -157,6 +157,18 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
+void ArenaAllocatorMemoryTool::DoMakeDefined(void* ptr, size_t size) {
+ MEMORY_TOOL_MAKE_DEFINED(ptr, size);
+}
+
+void ArenaAllocatorMemoryTool::DoMakeUndefined(void* ptr, size_t size) {
+ MEMORY_TOOL_MAKE_UNDEFINED(ptr, size);
+}
+
+void ArenaAllocatorMemoryTool::DoMakeInaccessible(void* ptr, size_t size) {
+ MEMORY_TOOL_MAKE_NOACCESS(ptr, size);
+}
+
Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
}
@@ -290,8 +302,7 @@ ArenaAllocator::ArenaAllocator(ArenaPool* pool)
begin_(nullptr),
end_(nullptr),
ptr_(nullptr),
- arena_head_(nullptr),
- is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL) {
+ arena_head_(nullptr) {
}
void ArenaAllocator::UpdateBytesAllocated() {
@@ -302,14 +313,13 @@ void ArenaAllocator::UpdateBytesAllocated() {
}
}
-void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+void* ArenaAllocator::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
// Obtain a new block.
ObtainNewArenaForAllocation(rounded_bytes);
- if (UNLIKELY(ptr_ == nullptr)) {
- return nullptr;
- }
+ CHECK(ptr_ != nullptr);
+ MEMORY_TOOL_MAKE_UNDEFINED(ptr_, end_ - ptr_);
}
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
uint8_t* ret = ptr_;
@@ -318,6 +328,7 @@ void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
CHECK_EQ(*ptr, 0U);
}
+ MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
MEMORY_TOOL_MAKE_NOACCESS(ret + bytes, rounded_bytes - bytes);
return ret;
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index b4f19ee8da..4e9282f9a1 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -21,6 +21,7 @@
#include <stddef.h>
#include "base/bit_utils.h"
+#include "base/memory_tool.h"
#include "debug_stack.h"
#include "macros.h"
#include "mutex.h"
@@ -104,6 +105,7 @@ enum ArenaAllocKind {
kArenaAllocStackMapStream,
kArenaAllocCodeGenerator,
kArenaAllocParallelMoveResolver,
+ kArenaAllocGraphChecker,
kNumArenaAllocKinds
};
@@ -117,13 +119,13 @@ class ArenaAllocatorStatsImpl<false> {
ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
- void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
+ void Copy(const ArenaAllocatorStatsImpl& other ATTRIBUTE_UNUSED) {}
+ void RecordAlloc(size_t bytes ATTRIBUTE_UNUSED, ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
- void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
- UNUSED(os); UNUSED(first); UNUSED(lost_bytes_adjustment);
- }
+ void Dump(std::ostream& os ATTRIBUTE_UNUSED,
+ const Arena* first ATTRIBUTE_UNUSED,
+ ssize_t lost_bytes_adjustment ATTRIBUTE_UNUSED) const {}
};
template <bool kCount>
@@ -149,6 +151,57 @@ class ArenaAllocatorStatsImpl {
typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
+template <bool kAvailable, bool kValgrind>
+class ArenaAllocatorMemoryToolCheckImpl {
+ // This is the generic template but since there is a partial specialization
+ // for kValgrind == false, this can be instantiated only for kValgrind == true.
+ static_assert(kValgrind, "This template can be instantiated only for Valgrind.");
+ static_assert(kAvailable, "Valgrind implies memory tool availability.");
+
+ public:
+ ArenaAllocatorMemoryToolCheckImpl() : is_running_on_valgrind_(RUNNING_ON_MEMORY_TOOL) { }
+ bool IsRunningOnMemoryTool() { return is_running_on_valgrind_; }
+
+ private:
+ const bool is_running_on_valgrind_;
+};
+
+template <bool kAvailable>
+class ArenaAllocatorMemoryToolCheckImpl<kAvailable, false> {
+ public:
+ ArenaAllocatorMemoryToolCheckImpl() { }
+ bool IsRunningOnMemoryTool() { return kAvailable; }
+};
+
+typedef ArenaAllocatorMemoryToolCheckImpl<kMemoryToolIsAvailable, kMemoryToolIsValgrind>
+ ArenaAllocatorMemoryToolCheck;
+
+class ArenaAllocatorMemoryTool : private ArenaAllocatorMemoryToolCheck {
+ public:
+ using ArenaAllocatorMemoryToolCheck::IsRunningOnMemoryTool;
+
+ void MakeDefined(void* ptr, size_t size) {
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ DoMakeDefined(ptr, size);
+ }
+ }
+ void MakeUndefined(void* ptr, size_t size) {
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ DoMakeUndefined(ptr, size);
+ }
+ }
+ void MakeInaccessible(void* ptr, size_t size) {
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ DoMakeInaccessible(ptr, size);
+ }
+ }
+
+ private:
+ void DoMakeDefined(void* ptr, size_t size);
+ void DoMakeUndefined(void* ptr, size_t size);
+ void DoMakeInaccessible(void* ptr, size_t size);
+};
+
class Arena {
public:
static constexpr size_t kDefaultSize = 128 * KB;
@@ -233,18 +286,24 @@ class ArenaPool {
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
-class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats {
+class ArenaAllocator
+ : private DebugStackRefCounter, private ArenaAllocatorStats, private ArenaAllocatorMemoryTool {
public:
explicit ArenaAllocator(ArenaPool* pool);
~ArenaAllocator();
+ using ArenaAllocatorMemoryTool::IsRunningOnMemoryTool;
+ using ArenaAllocatorMemoryTool::MakeDefined;
+ using ArenaAllocatorMemoryTool::MakeUndefined;
+ using ArenaAllocatorMemoryTool::MakeInaccessible;
+
// Get adapter for use in STL containers. See arena_containers.h .
ArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Returns zeroed memory.
void* Alloc(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
- if (UNLIKELY(is_running_on_memory_tool_)) {
- return AllocValgrind(bytes, kind);
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ return AllocWithMemoryTool(bytes, kind);
}
bytes = RoundUp(bytes, kAlignment);
if (UNLIKELY(ptr_ + bytes > end_)) {
@@ -268,6 +327,7 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
auto* end = reinterpret_cast<uint8_t*>(ptr) + ptr_size;
// If we haven't allocated anything else, we can safely extend.
if (end == ptr_) {
+ DCHECK(!IsRunningOnMemoryTool()); // Red zone prevents end == ptr_.
const size_t size_delta = new_size - ptr_size;
// Check remain space.
const size_t remain = end_ - ptr_;
@@ -288,7 +348,7 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
return static_cast<T*>(Alloc(length * sizeof(T), kind));
}
- void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
+ void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
void ObtainNewArenaForAllocation(size_t allocation_size);
@@ -316,7 +376,6 @@ class ArenaAllocator : private DebugStackRefCounter, private ArenaAllocatorStats
uint8_t* end_;
uint8_t* ptr_;
Arena* arena_head_;
- bool is_running_on_memory_tool_;
template <typename U>
friend class ArenaAllocatorAdapter;
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 810c1c4b66..e2d4c2411e 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -20,9 +20,12 @@
#include <deque>
#include <queue>
#include <set>
-#include <vector>
+#include <utility>
#include "arena_allocator.h"
+#include "base/dchecked_vector.h"
+#include "hash_map.h"
+#include "hash_set.h"
#include "safe_map.h"
namespace art {
@@ -48,7 +51,7 @@ template <typename T>
using ArenaQueue = std::queue<T, ArenaDeque<T>>;
template <typename T>
-using ArenaVector = std::vector<T, ArenaAllocatorAdapter<T>>;
+using ArenaVector = dchecked_vector<T, ArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
using ArenaSet = std::set<T, Comparator, ArenaAllocatorAdapter<T>>;
@@ -57,6 +60,24 @@ template <typename K, typename V, typename Comparator = std::less<K>>
using ArenaSafeMap =
SafeMap<K, V, Comparator, ArenaAllocatorAdapter<std::pair<const K, V>>>;
+template <typename T,
+ typename EmptyFn = DefaultEmptyFn<T>,
+ typename HashFn = std::hash<T>,
+ typename Pred = std::equal_to<T>>
+using ArenaHashSet = HashSet<T, EmptyFn, HashFn, Pred, ArenaAllocatorAdapter<T>>;
+
+template <typename Key,
+ typename Value,
+ typename EmptyFn = DefaultEmptyFn<std::pair<Key, Value>>,
+ typename HashFn = std::hash<Key>,
+ typename Pred = std::equal_to<Key>>
+using ArenaHashMap = HashMap<Key,
+ Value,
+ EmptyFn,
+ HashFn,
+ Pred,
+ ArenaAllocatorAdapter<std::pair<Key, Value>>>;
+
// Implementation details below.
template <bool kCount>
@@ -155,20 +176,22 @@ class ArenaAllocatorAdapter : private ArenaAllocatorAdapterKind {
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
void deallocate(pointer p, size_type n) {
- UNUSED(p, n);
+ arena_allocator_->MakeInaccessible(p, sizeof(T) * n);
}
- void construct(pointer p, const_reference val) {
- new (static_cast<void*>(p)) value_type(val);
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
}
- void destroy(pointer p) {
- p->~value_type();
+ template <typename U>
+ void destroy(U* p) {
+ p->~U();
}
private:
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index cfd3d24aad..5e97a63c0d 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -312,10 +312,6 @@ void BitVector::Copy(const BitVector *src) {
}
}
-#if defined(__clang__) && defined(__ARM_64BIT_STATE)
-// b/19180814 When POPCOUNT is inlined, boot up failed on arm64 devices.
-__attribute__((optnone))
-#endif
uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) {
uint32_t word_end = WordIndex(end);
uint32_t partial_word_bits = end & 0x1f;
diff --git a/runtime/base/dchecked_vector.h b/runtime/base/dchecked_vector.h
new file mode 100644
index 0000000000..6ec573a5fb
--- /dev/null
+++ b/runtime/base/dchecked_vector.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_DCHECKED_VECTOR_H_
+#define ART_RUNTIME_BASE_DCHECKED_VECTOR_H_
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "base/logging.h"
+
+namespace art {
+
+// Template class serving as a replacement for std::vector<> but adding
+// DCHECK()s for the subscript operator, front(), back(), pop_back(),
+// and for insert()/emplace()/erase() positions.
+//
+// Note: The element accessor at() is specified as throwing std::out_of_range
+// but we do not use exceptions, so this accessor is deliberately hidden.
+// Note: The common pattern &v[0] used to retrieve pointer to the data is not
+// valid for an empty dchecked_vector<>. Use data() to avoid checking empty().
+template <typename T, typename Alloc>
+class dchecked_vector : private std::vector<T, Alloc> {
+ private:
+ // std::vector<> has a slightly different specialization for bool. We don't provide that.
+ static_assert(!std::is_same<T, bool>::value, "Not implemented for bool.");
+ using Base = std::vector<T, Alloc>;
+
+ public:
+ using typename Base::value_type;
+ using typename Base::allocator_type;
+ using typename Base::reference;
+ using typename Base::const_reference;
+ using typename Base::pointer;
+ using typename Base::const_pointer;
+ using typename Base::iterator;
+ using typename Base::const_iterator;
+ using typename Base::reverse_iterator;
+ using typename Base::const_reverse_iterator;
+ using typename Base::size_type;
+ using typename Base::difference_type;
+
+ // Construct/copy/destroy.
+ dchecked_vector()
+ : Base() { }
+ explicit dchecked_vector(const allocator_type& alloc)
+ : Base(alloc) { }
+ explicit dchecked_vector(size_type n, const allocator_type& alloc = allocator_type())
+ : Base(n, alloc) { }
+ dchecked_vector(size_type n,
+ const value_type& value,
+ const allocator_type& alloc = allocator_type())
+ : Base(n, value, alloc) { }
+ template <typename InputIterator>
+ dchecked_vector(InputIterator first,
+ InputIterator last,
+ const allocator_type& alloc = allocator_type())
+ : Base(first, last, alloc) { }
+ dchecked_vector(const dchecked_vector& src)
+ : Base(src) { }
+ dchecked_vector(const dchecked_vector& src, const allocator_type& alloc)
+ : Base(src, alloc) { }
+ dchecked_vector(dchecked_vector&& src)
+ : Base(std::move(src)) { }
+ dchecked_vector(dchecked_vector&& src, const allocator_type& alloc)
+ : Base(std::move(src), alloc) { }
+ dchecked_vector(std::initializer_list<value_type> il,
+ const allocator_type& alloc = allocator_type())
+ : Base(il, alloc) { }
+ ~dchecked_vector() = default;
+ dchecked_vector& operator=(const dchecked_vector& src) {
+ Base::operator=(src);
+ return *this;
+ }
+ dchecked_vector& operator=(dchecked_vector&& src) {
+ Base::operator=(std::move(src));
+ return *this;
+ }
+ dchecked_vector& operator=(std::initializer_list<value_type> il) {
+ Base::operator=(il);
+ return *this;
+ }
+
+ // Iterators.
+ using Base::begin;
+ using Base::end;
+ using Base::rbegin;
+ using Base::rend;
+ using Base::cbegin;
+ using Base::cend;
+ using Base::crbegin;
+ using Base::crend;
+
+ // Capacity.
+ using Base::size;
+ using Base::max_size;
+ using Base::resize;
+ using Base::capacity;
+ using Base::empty;
+ using Base::reserve;
+ using Base::shrink_to_fit;
+
+ // Element access: inherited.
+ // Note: Deliberately not providing at().
+ using Base::data;
+
+ // Element access: subscript operator. Check index.
+ reference operator[](size_type n) {
+ DCHECK_LT(n, size());
+ return Base::operator[](n);
+ }
+ const_reference operator[](size_type n) const {
+ DCHECK_LT(n, size());
+ return Base::operator[](n);
+ }
+
+ // Element access: front(), back(). Check not empty.
+ reference front() { DCHECK(!empty()); return Base::front(); }
+ const_reference front() const { DCHECK(!empty()); return Base::front(); }
+ reference back() { DCHECK(!empty()); return Base::back(); }
+ const_reference back() const { DCHECK(!empty()); return Base::back(); }
+
+ // Modifiers: inherited.
+ using Base::assign;
+ using Base::push_back;
+ using Base::clear;
+ using Base::emplace_back;
+
+ // Modifiers: pop_back(). Check not empty.
+ void pop_back() { DCHECK(!empty()); Base::pop_back(); }
+
+ // Modifiers: swap(). Swap only with another dchecked_vector instead of a plain vector.
+ void swap(dchecked_vector& other) { Base::swap(other); }
+
+ // Modifiers: insert(). Check position.
+ iterator insert(const_iterator position, const value_type& value) {
+ DCHECK(cbegin() <= position && position <= cend());
+ return Base::insert(position, value);
+ }
+ iterator insert(const_iterator position, size_type n, const value_type& value) {
+ DCHECK(cbegin() <= position && position <= cend());
+ return Base::insert(position, n, value);
+ }
+ template <typename InputIterator>
+ iterator insert(const_iterator position, InputIterator first, InputIterator last) {
+ DCHECK(cbegin() <= position && position <= cend());
+ return Base::insert(position, first, last);
+ }
+ iterator insert(const_iterator position, value_type&& value) {
+ DCHECK(cbegin() <= position && position <= cend());
+ return Base::insert(position, std::move(value));
+ }
+ iterator insert(const_iterator position, std::initializer_list<value_type> il) {
+ DCHECK(cbegin() <= position && position <= cend());
+ return Base::insert(position, il);
+ }
+
+ // Modifiers: erase(). Check position.
+ iterator erase(const_iterator position) {
+ DCHECK(cbegin() <= position && position < cend());
+ return Base::erase(position);
+ }
+ iterator erase(const_iterator first, const_iterator last) {
+ DCHECK(cbegin() <= first && first <= cend());
+ DCHECK(first <= last && last <= cend());
+ return Base::erase(first, last);
+ }
+
+ // Modifiers: emplace(). Check position.
+ template <typename... Args>
+ iterator emplace(const_iterator position, Args&&... args) {
+ DCHECK(cbegin() <= position && position <= cend());
+ Base::emplace(position, std::forward(args...));
+ }
+
+ // Allocator.
+ using Base::get_allocator;
+};
+
+// Non-member swap(), found by argument-dependent lookup for an unqualified call.
+template <typename T, typename Alloc>
+void swap(dchecked_vector<T, Alloc>& lhs, dchecked_vector<T, Alloc>& rhs) {
+ lhs.swap(rhs);
+}
+
+// Non-member relational operators.
+template <typename T, typename Alloc>
+bool operator==(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin());
+}
+template <typename T, typename Alloc>
+bool operator!=(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return !(lhs == rhs);
+}
+template <typename T, typename Alloc>
+bool operator<(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+}
+template <typename T, typename Alloc>
+bool operator<=(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return !(rhs < lhs);
+}
+template <typename T, typename Alloc>
+bool operator>(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return rhs < lhs;
+}
+template <typename T, typename Alloc>
+bool operator>=(const dchecked_vector<T, Alloc>& lhs, const dchecked_vector<T, Alloc>& rhs) {
+ return !(lhs < rhs);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_DCHECKED_VECTOR_H_
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index 03f457534e..e19aecb712 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -54,7 +54,7 @@ class DebugStackRefCounterImpl<false> {
template <>
class DebugStackReferenceImpl<false> {
public:
- explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter) { UNUSED(counter); }
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter ATTRIBUTE_UNUSED) {}
DebugStackReferenceImpl(const DebugStackReferenceImpl& other) = default;
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) = default;
void CheckTop() { }
@@ -63,7 +63,7 @@ class DebugStackReferenceImpl<false> {
template <>
class DebugStackIndirectTopRefImpl<false> {
public:
- explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref) { UNUSED(ref); }
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref ATTRIBUTE_UNUSED) {}
DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other) = default;
DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) = default;
void CheckTop() { }
diff --git a/runtime/base/hash_map.h b/runtime/base/hash_map.h
index eab80ff19f..b18d586f3a 100644
--- a/runtime/base/hash_map.h
+++ b/runtime/base/hash_map.h
@@ -51,8 +51,22 @@ class HashMapWrapper {
template <class Key, class Value, class EmptyFn,
class HashFn = std::hash<Key>, class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key, Value>>>
-class HashMap : public HashSet<std::pair<Key, Value>, EmptyFn, HashMapWrapper<HashFn>,
- HashMapWrapper<Pred>, Alloc> {
+class HashMap : public HashSet<std::pair<Key, Value>,
+ EmptyFn,
+ HashMapWrapper<HashFn>,
+ HashMapWrapper<Pred>,
+ Alloc> {
+ private:
+ using Base = HashSet<std::pair<Key, Value>,
+ EmptyFn,
+ HashMapWrapper<HashFn>,
+ HashMapWrapper<Pred>,
+ Alloc>;
+
+ public:
+ HashMap() : Base() { }
+ explicit HashMap(const Alloc& alloc)
+ : Base(alloc) { }
};
} // namespace art
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index d110fe30b7..4819f06bb4 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_BASE_HASH_SET_H_
#include <functional>
+#include <iterator>
#include <memory>
#include <stdint.h>
#include <utility>
@@ -45,7 +46,7 @@ class DefaultEmptyFn<T*> {
void MakeEmpty(T*& item) const {
item = nullptr;
}
- bool IsEmpty(const T*& item) const {
+ bool IsEmpty(T* const& item) const {
return item == nullptr;
}
};
@@ -59,7 +60,7 @@ template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T
class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
class HashSet {
template <class Elem, class HashSetType>
- class BaseIterator {
+ class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
public:
BaseIterator(const BaseIterator&) = default;
BaseIterator(BaseIterator&&) = default;
@@ -82,7 +83,7 @@ class HashSet {
}
BaseIterator operator++(int) {
- Iterator temp = *this;
+ BaseIterator temp = *this;
this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
return temp;
}
@@ -96,7 +97,7 @@ class HashSet {
return &**this;
}
- // TODO: Operator -- --(int)
+ // TODO: Operator -- --(int) (and use std::bidirectional_iterator_tag)
private:
size_t index_;
@@ -115,34 +116,91 @@ class HashSet {
};
public:
- static constexpr double kDefaultMinLoadFactor = 0.5;
- static constexpr double kDefaultMaxLoadFactor = 0.9;
+ using value_type = T;
+ using allocator_type = Alloc;
+ using reference = T&;
+ using const_reference = const T&;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using iterator = BaseIterator<T, HashSet>;
+ using const_iterator = BaseIterator<const T, const HashSet>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ static constexpr double kDefaultMinLoadFactor = 0.4;
+ static constexpr double kDefaultMaxLoadFactor = 0.7;
static constexpr size_t kMinBuckets = 1000;
- typedef BaseIterator<T, HashSet> Iterator;
- typedef BaseIterator<const T, const HashSet> ConstIterator;
-
// If we don't own the data, this will create a new array which owns the data.
void Clear() {
DeallocateStorage();
- AllocateStorage(1);
num_elements_ = 0;
elements_until_expand_ = 0;
}
- HashSet() : num_elements_(0), num_buckets_(0), owns_data_(false), data_(nullptr),
- min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
- Clear();
- }
-
- HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
- data_(nullptr) {
- *this = other;
+ HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
+
+ HashSet(double min_load_factor, double max_load_factor)
+ : num_elements_(0u),
+ num_buckets_(0u),
+ elements_until_expand_(0u),
+ owns_data_(false),
+ data_(nullptr),
+ min_load_factor_(min_load_factor),
+ max_load_factor_(max_load_factor) {
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
+ }
+
+ explicit HashSet(const allocator_type& alloc)
+ : allocfn_(alloc),
+ hashfn_(),
+ emptyfn_(),
+ pred_(),
+ num_elements_(0u),
+ num_buckets_(0u),
+ elements_until_expand_(0u),
+ owns_data_(false),
+ data_(nullptr),
+ min_load_factor_(kDefaultMinLoadFactor),
+ max_load_factor_(kDefaultMaxLoadFactor) {
+ }
+
+ HashSet(const HashSet& other)
+ : allocfn_(other.allocfn_),
+ hashfn_(other.hashfn_),
+ emptyfn_(other.emptyfn_),
+ pred_(other.pred_),
+ num_elements_(other.num_elements_),
+ num_buckets_(0),
+ elements_until_expand_(other.elements_until_expand_),
+ owns_data_(false),
+ data_(nullptr),
+ min_load_factor_(other.min_load_factor_),
+ max_load_factor_(other.max_load_factor_) {
+ AllocateStorage(other.NumBuckets());
+ for (size_t i = 0; i < num_buckets_; ++i) {
+ ElementForIndex(i) = other.data_[i];
+ }
}
- HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), owns_data_(false),
- data_(nullptr) {
- *this = std::move(other);
+ HashSet(HashSet&& other)
+ : allocfn_(std::move(other.allocfn_)),
+ hashfn_(std::move(other.hashfn_)),
+ emptyfn_(std::move(other.emptyfn_)),
+ pred_(std::move(other.pred_)),
+ num_elements_(other.num_elements_),
+ num_buckets_(other.num_buckets_),
+ elements_until_expand_(other.elements_until_expand_),
+ owns_data_(other.owns_data_),
+ data_(other.data_),
+ min_load_factor_(other.min_load_factor_),
+ max_load_factor_(other.max_load_factor_) {
+ other.num_elements_ = 0u;
+ other.num_buckets_ = 0u;
+ other.elements_until_expand_ = 0u;
+ other.owns_data_ = false;
+ other.data_ = nullptr;
}
// Construct from existing data.
@@ -199,32 +257,18 @@ class HashSet {
}
HashSet& operator=(HashSet&& other) {
- std::swap(data_, other.data_);
- std::swap(num_buckets_, other.num_buckets_);
- std::swap(num_elements_, other.num_elements_);
- std::swap(elements_until_expand_, other.elements_until_expand_);
- std::swap(min_load_factor_, other.min_load_factor_);
- std::swap(max_load_factor_, other.max_load_factor_);
- std::swap(owns_data_, other.owns_data_);
+ HashSet(std::move(other)).swap(*this);
return *this;
}
HashSet& operator=(const HashSet& other) {
- DeallocateStorage();
- AllocateStorage(other.NumBuckets());
- for (size_t i = 0; i < num_buckets_; ++i) {
- ElementForIndex(i) = other.data_[i];
- }
- num_elements_ = other.num_elements_;
- elements_until_expand_ = other.elements_until_expand_;
- min_load_factor_ = other.min_load_factor_;
- max_load_factor_ = other.max_load_factor_;
+ HashSet(other).swap(*this); // NOLINT(runtime/explicit) - a case of lint gone mad.
return *this;
}
// Lower case for c++11 for each.
- Iterator begin() {
- Iterator ret(this, 0);
+ iterator begin() {
+ iterator ret(this, 0);
if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
++ret; // Skip all the empty slots.
}
@@ -232,8 +276,8 @@ class HashSet {
}
// Lower case for c++11 for each. const version.
- ConstIterator begin() const {
- ConstIterator ret(this, 0);
+ const_iterator begin() const {
+ const_iterator ret(this, 0);
if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
++ret; // Skip all the empty slots.
}
@@ -241,13 +285,13 @@ class HashSet {
}
// Lower case for c++11 for each.
- Iterator end() {
- return Iterator(this, NumBuckets());
+ iterator end() {
+ return iterator(this, NumBuckets());
}
// Lower case for c++11 for each. const version.
- ConstIterator end() const {
- return ConstIterator(this, NumBuckets());
+ const_iterator end() const {
+ return const_iterator(this, NumBuckets());
}
bool Empty() {
@@ -262,7 +306,7 @@ class HashSet {
// and set the empty slot to be the location we just moved from.
// Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
// element to its actual location/index.
- Iterator Erase(Iterator it) {
+ iterator Erase(iterator it) {
// empty_index is the index that will become empty.
size_t empty_index = it.index_;
DCHECK(!IsFreeSlot(empty_index));
@@ -313,23 +357,23 @@ class HashSet {
// Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
// object in the heap for performance solution.
template <typename K>
- Iterator Find(const K& key) {
+ iterator Find(const K& key) {
return FindWithHash(key, hashfn_(key));
}
template <typename K>
- ConstIterator Find(const K& key) const {
+ const_iterator Find(const K& key) const {
return FindWithHash(key, hashfn_(key));
}
template <typename K>
- Iterator FindWithHash(const K& key, size_t hash) {
- return Iterator(this, FindIndex(key, hash));
+ iterator FindWithHash(const K& key, size_t hash) {
+ return iterator(this, FindIndex(key, hash));
}
template <typename K>
- ConstIterator FindWithHash(const K& key, size_t hash) const {
- return ConstIterator(this, FindIndex(key, hash));
+ const_iterator FindWithHash(const K& key, size_t hash) const {
+ return const_iterator(this, FindIndex(key, hash));
}
// Insert an element, allows duplicates.
@@ -352,6 +396,26 @@ class HashSet {
return num_elements_;
}
+ void swap(HashSet& other) {
+ // Use argument-dependent lookup with fall-back to std::swap() for function objects.
+ using std::swap;
+ swap(allocfn_, other.allocfn_);
+ swap(hashfn_, other.hashfn_);
+ swap(emptyfn_, other.emptyfn_);
+ swap(pred_, other.pred_);
+ std::swap(data_, other.data_);
+ std::swap(num_buckets_, other.num_buckets_);
+ std::swap(num_elements_, other.num_elements_);
+ std::swap(elements_until_expand_, other.elements_until_expand_);
+ std::swap(min_load_factor_, other.min_load_factor_);
+ std::swap(max_load_factor_, other.max_load_factor_);
+ std::swap(owns_data_, other.owns_data_);
+ }
+
+ allocator_type get_allocator() const {
+ return allocfn_;
+ }
+
void ShrinkToMaximumLoad() {
Resize(Size() / max_load_factor_);
}
@@ -399,6 +463,31 @@ class HashSet {
return errors;
}
+ double GetMinLoadFactor() const {
+ return min_load_factor_;
+ }
+
+ double GetMaxLoadFactor() const {
+ return max_load_factor_;
+ }
+
+ // Change the load factor of the hash set. If the current load factor is greater than the max
+ // specified, then we resize the hash table storage.
+ void SetLoadFactor(double min_load_factor, double max_load_factor) {
+ DCHECK_LT(min_load_factor, max_load_factor);
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
+ min_load_factor_ = min_load_factor;
+ max_load_factor_ = max_load_factor;
+ elements_until_expand_ = NumBuckets() * max_load_factor_;
+ // If the current load factor isn't in the range, then resize to the mean of the minimum and
+ // maximum load factor.
+ const double load_factor = CalculateLoadFactor();
+ if (load_factor > max_load_factor_) {
+ Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ }
+ }
+
private:
T& ElementForIndex(size_t index) {
DCHECK_LT(index, NumBuckets());
@@ -429,7 +518,7 @@ class HashSet {
}
// Find the hash table slot for an element, or return NumBuckets() if not found.
- // This value for not found is important so that Iterator(this, FindIndex(...)) == end().
+ // This value for not found is important so that iterator(this, FindIndex(...)) == end().
template <typename K>
size_t FindIndex(const K& element, size_t hash) const {
// Guard against failing to get an element for a non-existing index.
@@ -560,6 +649,12 @@ class HashSet {
double max_load_factor_;
};
+template <class T, class EmptyFn, class HashFn, class Pred, class Alloc>
+void swap(HashSet<T, EmptyFn, HashFn, Pred, Alloc>& lhs,
+ HashSet<T, EmptyFn, HashFn, Pred, Alloc>& rhs) {
+ lhs.swap(rhs);
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_HASH_SET_H_
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 6d2c5e0f2c..743e98ed84 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -196,6 +196,24 @@ TEST_F(HashSetTest, TestShrink) {
}
}
+TEST_F(HashSetTest, TestLoadFactor) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ static constexpr size_t kStringCount = 1000;
+ static constexpr double kEpsilon = 0.01;
+ for (size_t i = 0; i < kStringCount; ++i) {
+ hash_set.Insert(RandomString(i % 10 + 1));
+ }
+ // Check that changing the load factor resizes the table to be within the target range.
+ EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.1, 0.3);
+ EXPECT_DOUBLE_EQ(0.1, hash_set.GetMinLoadFactor());
+ EXPECT_DOUBLE_EQ(0.3, hash_set.GetMaxLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.6, 0.8);
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+}
+
TEST_F(HashSetTest, TestStress) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::unordered_multiset<std::string> std_set;
diff --git a/runtime/base/memory_tool.h b/runtime/base/memory_tool.h
index e0bdcfeced..e1a2e07aca 100644
--- a/runtime/base/memory_tool.h
+++ b/runtime/base/memory_tool.h
@@ -32,10 +32,12 @@
#define MEMORY_TOOL_MAKE_NOACCESS(p, s) __asan_poison_memory_region(p, s)
#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) __asan_unpoison_memory_region(p, s)
#define MEMORY_TOOL_MAKE_DEFINED(p, s) __asan_unpoison_memory_region(p, s)
+constexpr bool kMemoryToolIsAvailable = true;
#else
#define MEMORY_TOOL_MAKE_NOACCESS(p, s) do { (void)(p); (void)(s); } while (0)
#define MEMORY_TOOL_MAKE_UNDEFINED(p, s) do { (void)(p); (void)(s); } while (0)
#define MEMORY_TOOL_MAKE_DEFINED(p, s) do { (void)(p); (void)(s); } while (0)
+constexpr bool kMemoryToolIsAvailable = false;
#endif
#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
@@ -54,6 +56,7 @@ constexpr size_t kMemoryToolStackGuardSizeScale = 2;
#define MEMORY_TOOL_MAKE_DEFINED(p, s) VALGRIND_MAKE_MEM_DEFINED(p, s)
#define ATTRIBUTE_NO_SANITIZE_ADDRESS
#define RUNNING_ON_MEMORY_TOOL RUNNING_ON_VALGRIND
+constexpr bool kMemoryToolIsAvailable = true;
constexpr bool kMemoryToolIsValgrind = true;
constexpr bool kMemoryToolDetectsLeaks = true;
constexpr bool kMemoryToolAddsRedzones = true;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index b2c567760f..70bd398415 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -50,6 +50,8 @@ Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
MutatorMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
+ReaderWriterMutex* Locks::oat_file_count_lock_ = nullptr;
Mutex* Locks::reference_processor_lock_ = nullptr;
Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
@@ -940,6 +942,8 @@ void Locks::Init() {
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(oat_file_manager_lock_ != nullptr);
+ DCHECK(oat_file_count_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -1028,6 +1032,14 @@ void Locks::Init() {
modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
}
+ UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
+ DCHECK(oat_file_manager_lock_ == nullptr);
+ oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kOatFileCountLock);
+ DCHECK(oat_file_count_lock_ == nullptr);
+ oat_file_count_lock_ = new ReaderWriterMutex("OatFile count lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
DCHECK(intern_table_lock_ == nullptr);
intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3da806b54d..d4c9057ab3 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -83,6 +83,8 @@ enum LockLevel {
kDexFileToMethodInlinerMapLock,
kInternTableLock,
kOatFileSecondaryLookupLock,
+ kOatFileCountLock,
+ kOatFileManagerLock,
kTracingUniqueMethodsLock,
kTracingStreamingLock,
kDefaultMutexLevel,
@@ -644,8 +646,14 @@ class Locks {
// Guards modification of the LDT on x86.
static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+ // Guards opened oat files in OatFileManager.
+ static ReaderWriterMutex* oat_file_manager_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+
+ // Guards opened oat files in OatFileManager.
+ static ReaderWriterMutex* oat_file_count_lock_ ACQUIRED_AFTER(oat_file_manager_lock_);
+
// Guards intern table.
- static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
+ static Mutex* intern_table_lock_ ACQUIRED_AFTER(oat_file_count_lock_);
// Guards reference processor.
static Mutex* reference_processor_lock_ ACQUIRED_AFTER(intern_table_lock_);
diff --git a/runtime/base/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
index d823edd6d2..31f96e4783 100644
--- a/runtime/base/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -29,8 +29,7 @@ ArenaStack::ArenaStack(ArenaPool* arena_pool)
bottom_arena_(nullptr),
top_arena_(nullptr),
top_ptr_(nullptr),
- top_end_(nullptr),
- is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL > 0) {
+ top_end_(nullptr) {
}
ArenaStack::~ArenaStack() {
@@ -91,7 +90,7 @@ void ArenaStack::UpdateBytesAllocated() {
}
}
-void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+void* ArenaStack::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index ca514e411c..2554fb0754 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -32,11 +32,16 @@ template <typename T>
class ScopedArenaAllocatorAdapter;
// Holds a list of Arenas for use by ScopedArenaAllocator stack.
-class ArenaStack : private DebugStackRefCounter {
+class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
public:
explicit ArenaStack(ArenaPool* arena_pool);
~ArenaStack();
+ using ArenaAllocatorMemoryTool::IsRunningOnMemoryTool;
+ using ArenaAllocatorMemoryTool::MakeDefined;
+ using ArenaAllocatorMemoryTool::MakeUndefined;
+ using ArenaAllocatorMemoryTool::MakeInaccessible;
+
void Reset();
size_t PeakBytesAllocated() {
@@ -64,8 +69,8 @@ class ArenaStack : private DebugStackRefCounter {
// Private - access via ScopedArenaAllocator or ScopedArenaAllocatorAdapter.
void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
- if (UNLIKELY(is_running_on_memory_tool_)) {
- return AllocValgrind(bytes, kind);
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ return AllocWithMemoryTool(bytes, kind);
}
size_t rounded_bytes = RoundUp(bytes, 8);
uint8_t* ptr = top_ptr_;
@@ -80,7 +85,7 @@ class ArenaStack : private DebugStackRefCounter {
uint8_t* AllocateFromNextArena(size_t rounded_bytes);
void UpdatePeakStatsAndRestore(const ArenaAllocatorStats& restore_stats);
void UpdateBytesAllocated();
- void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
+ void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
StatsAndPool stats_and_pool_;
Arena* bottom_arena_;
@@ -88,8 +93,6 @@ class ArenaStack : private DebugStackRefCounter {
uint8_t* top_ptr_;
uint8_t* top_end_;
- const bool is_running_on_memory_tool_;
-
friend class ScopedArenaAllocator;
template <typename T>
friend class ScopedArenaAllocatorAdapter;
@@ -129,7 +132,7 @@ class ScopedArenaAllocator
ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Allow a delete-expression to destroy but not deallocate allocators created by Create().
- static void operator delete(void* ptr) { UNUSED(ptr); }
+ static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
ArenaStack* const arena_stack_;
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 82db60e4e4..562c2bf01c 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -21,9 +21,10 @@
#include <queue>
#include <set>
#include <unordered_map>
-#include <vector>
+#include <utility>
#include "arena_containers.h" // For ArenaAllocatorAdapterKind.
+#include "base/dchecked_vector.h"
#include "scoped_arena_allocator.h"
#include "safe_map.h"
@@ -47,7 +48,7 @@ template <typename T>
using ScopedArenaQueue = std::queue<T, ScopedArenaDeque<T>>;
template <typename T>
-using ScopedArenaVector = std::vector<T, ScopedArenaAllocatorAdapter<T>>;
+using ScopedArenaVector = dchecked_vector<T, ScopedArenaAllocatorAdapter<T>>;
template <typename T, typename Comparator = std::less<T>>
using ScopedArenaSet = std::set<T, Comparator, ScopedArenaAllocatorAdapter<T>>;
@@ -145,26 +146,27 @@ class ScopedArenaAllocatorAdapter
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ScopedArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
- UNUSED(p);
- UNUSED(n);
DebugStackIndirectTopRef::CheckTop();
+ arena_stack_->MakeInaccessible(p, sizeof(T) * n);
}
- void construct(pointer p, const_reference val) {
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
// Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
- new (static_cast<void*>(p)) value_type(val);
+ ::new (static_cast<void*>(p)) U(std::forward<Args>(args)...);
}
- void destroy(pointer p) {
+ template <typename U>
+ void destroy(U* p) {
// Don't CheckTop(), allow reusing existing capacity of a vector/deque below the top.
- p->~value_type();
+ p->~U();
}
private:
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 7965cd78bc..e8973511e3 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#include "art_code.h"
#include "art_method-inl.h"
#include "gc_map.h"
#include "scoped_thread_state_change.h"
@@ -53,7 +54,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
- if (GetMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
} else {
CheckQuickMethod(registers, number_of_references, native_pc_offset);
@@ -64,7 +65,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = m->GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
@@ -108,7 +109,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- NativePcOffsetToReferenceMap map(m->GetNativeGcMap(sizeof(void*)));
+ NativePcOffsetToReferenceMap map(GetCurrentCode().GetNativeGcMap(sizeof(void*)));
const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
CHECK(ref_bitmap);
for (int i = 0; i < number_of_references; ++i) {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index acb39c5402..395649ed74 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -16,12 +16,15 @@
#include "class_linker.h"
+#include <algorithm>
#include <deque>
#include <iostream>
#include <memory>
#include <queue>
#include <string>
+#include <tuple>
#include <unistd.h>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -41,24 +44,20 @@
#include "compiler_callbacks.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
-#include "handle_scope.h"
+#include "handle_scope-inl.h"
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "leb128.h"
#include "linear_alloc.h"
-#include "oat.h"
-#include "oat_file.h"
-#include "oat_file-inl.h"
-#include "oat_file_assistant.h"
-#include "object_lock.h"
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
@@ -72,12 +71,17 @@
#include "mirror/reference-inl.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
+#include "native/dalvik_system_DexFile.h"
+#include "oat.h"
+#include "oat_file.h"
+#include "oat_file-inl.h"
+#include "oat_file_assistant.h"
+#include "oat_file_manager.h"
+#include "object_lock.h"
#include "os.h"
#include "runtime.h"
-#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
-#include "handle_scope-inl.h"
#include "thread-inl.h"
#include "trace.h"
#include "utils.h"
@@ -89,9 +93,6 @@ namespace art {
static constexpr bool kSanityCheckObjects = kIsDebugBuild;
-// For b/21333911.
-static constexpr bool kDuplicateClassesCheck = false;
-
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -696,343 +697,6 @@ void ClassLinker::RunRootClinits() {
}
}
-const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
- WriterMutexLock mu(Thread::Current(), dex_lock_);
- if (kIsDebugBuild) {
- for (size_t i = 0; i < oat_files_.size(); ++i) {
- CHECK_NE(oat_file, oat_files_[i]) << oat_file->GetLocation();
- }
- }
- VLOG(class_linker) << "Registering " << oat_file->GetLocation();
- oat_files_.push_back(oat_file);
- return oat_file;
-}
-
-OatFile& ClassLinker::GetImageOatFile(gc::space::ImageSpace* space) {
- VLOG(startup) << "ClassLinker::GetImageOatFile entering";
- OatFile* oat_file = space->ReleaseOatFile();
- CHECK_EQ(RegisterOatFile(oat_file), oat_file);
- VLOG(startup) << "ClassLinker::GetImageOatFile exiting";
- return *oat_file;
-}
-
-class DexFileAndClassPair : ValueObject {
- public:
- DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
- : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
- dex_file_(dex_file),
- current_class_index_(current_class_index),
- from_loaded_oat_(from_loaded_oat) {}
-
- DexFileAndClassPair(const DexFileAndClassPair&) = default;
-
- DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) {
- cached_descriptor_ = rhs.cached_descriptor_;
- dex_file_ = rhs.dex_file_;
- current_class_index_ = rhs.current_class_index_;
- from_loaded_oat_ = rhs.from_loaded_oat_;
- return *this;
- }
-
- const char* GetCachedDescriptor() const {
- return cached_descriptor_;
- }
-
- bool operator<(const DexFileAndClassPair& rhs) const {
- const char* lhsDescriptor = cached_descriptor_;
- const char* rhsDescriptor = rhs.cached_descriptor_;
- int cmp = strcmp(lhsDescriptor, rhsDescriptor);
- if (cmp != 0) {
- // Note that the order must be reversed. We want to iterate over the classes in dex files.
- // They are sorted lexicographically. Thus, the priority-queue must be a min-queue.
- return cmp > 0;
- }
- return dex_file_ < rhs.dex_file_;
- }
-
- bool DexFileHasMoreClasses() const {
- return current_class_index_ + 1 < dex_file_->NumClassDefs();
- }
-
- DexFileAndClassPair GetNext() const {
- return DexFileAndClassPair(dex_file_, current_class_index_ + 1, from_loaded_oat_);
- }
-
- size_t GetCurrentClassIndex() const {
- return current_class_index_;
- }
-
- bool FromLoadedOat() const {
- return from_loaded_oat_;
- }
-
- const DexFile* GetDexFile() const {
- return dex_file_;
- }
-
- void DeleteDexFile() {
- delete dex_file_;
- dex_file_ = nullptr;
- }
-
- private:
- static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
- return dex_file->StringByTypeIdx(class_def.class_idx_);
- }
-
- const char* cached_descriptor_;
- const DexFile* dex_file_;
- size_t current_class_index_;
- bool from_loaded_oat_; // We only need to compare mismatches between what we load now
- // and what was loaded before. Any old duplicates must have been
- // OK, and any new "internal" duplicates are as well (they must
- // be from multidex, which resolves correctly).
-};
-
-static void AddDexFilesFromOat(const OatFile* oat_file,
- bool already_loaded,
- std::priority_queue<DexFileAndClassPair>* heap) {
- const std::vector<const OatDexFile*>& oat_dex_files = oat_file->GetOatDexFiles();
- for (const OatDexFile* oat_dex_file : oat_dex_files) {
- std::string error;
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
- if (dex_file.get() == nullptr) {
- LOG(WARNING) << "Could not create dex file from oat file: " << error;
- } else {
- if (dex_file->NumClassDefs() > 0U) {
- heap->emplace(dex_file.release(), 0U, already_loaded);
- }
- }
- }
-}
-
-static void AddNext(DexFileAndClassPair* original,
- std::priority_queue<DexFileAndClassPair>* heap) {
- if (original->DexFileHasMoreClasses()) {
- heap->push(original->GetNext());
- } else {
- // Need to delete the dex file.
- original->DeleteDexFile();
- }
-}
-
-static void FreeDexFilesInHeap(std::priority_queue<DexFileAndClassPair>* heap) {
- while (!heap->empty()) {
- delete heap->top().GetDexFile();
- heap->pop();
- }
-}
-
-const OatFile* ClassLinker::GetBootOatFile() {
- gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- return nullptr;
- }
- return image_space->GetOatFile();
-}
-
-const OatFile* ClassLinker::GetPrimaryOatFile() {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- const OatFile* boot_oat_file = GetBootOatFile();
- if (boot_oat_file != nullptr) {
- for (const OatFile* oat_file : oat_files_) {
- if (oat_file != boot_oat_file) {
- return oat_file;
- }
- }
- }
- return nullptr;
-}
-
-// Check for class-def collisions in dex files.
-//
-// This works by maintaining a heap with one class from each dex file, sorted by the class
-// descriptor. Then a dex-file/class pair is continually removed from the heap and compared
-// against the following top element. If the descriptor is the same, it is now checked whether
-// the two elements agree on whether their dex file was from an already-loaded oat-file or the
-// new oat file. Any disagreement indicates a collision.
-bool ClassLinker::HasCollisions(const OatFile* oat_file, std::string* error_msg) {
- if (!kDuplicateClassesCheck) {
- return false;
- }
-
- // Dex files are registered late - once a class is actually being loaded. We have to compare
- // against the open oat files. Take the dex_lock_ that protects oat_files_ accesses.
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
-
- std::priority_queue<DexFileAndClassPair> queue;
-
- // Add dex files from already loaded oat files, but skip boot.
- {
- const OatFile* boot_oat = GetBootOatFile();
- for (const OatFile* loaded_oat_file : oat_files_) {
- if (loaded_oat_file == boot_oat) {
- continue;
- }
- AddDexFilesFromOat(loaded_oat_file, true, &queue);
- }
- }
-
- if (queue.empty()) {
- // No other oat files, return early.
- return false;
- }
-
- // Add dex files from the oat file to check.
- AddDexFilesFromOat(oat_file, false, &queue);
-
- // Now drain the queue.
- while (!queue.empty()) {
- DexFileAndClassPair compare_pop = queue.top();
- queue.pop();
-
- // Compare against the following elements.
- while (!queue.empty()) {
- DexFileAndClassPair top = queue.top();
-
- if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
- // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
- if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
- *error_msg =
- StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
- compare_pop.GetCachedDescriptor(),
- compare_pop.GetDexFile()->GetLocation().c_str(),
- top.GetDexFile()->GetLocation().c_str());
- FreeDexFilesInHeap(&queue);
- return true;
- }
- // Pop it.
- queue.pop();
- AddNext(&top, &queue);
- } else {
- // Something else. Done here.
- break;
- }
- }
- AddNext(&compare_pop, &queue);
- }
-
- return false;
-}
-
-std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
- const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs) {
- CHECK(error_msgs != nullptr);
-
- // Verify we aren't holding the mutator lock, which could starve GC if we
- // have to generate or relocate an oat file.
- Locks::mutator_lock_->AssertNotHeld(Thread::Current());
-
- OatFileAssistant oat_file_assistant(dex_location, oat_location, kRuntimeISA,
- !Runtime::Current()->IsAotCompiler());
-
- // Lock the target oat location to avoid races generating and loading the
- // oat file.
- std::string error_msg;
- if (!oat_file_assistant.Lock(&error_msg)) {
- // Don't worry too much if this fails. If it does fail, it's unlikely we
- // can generate an oat file anyway.
- VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
- }
-
- // Check if we already have an up-to-date oat file open.
- const OatFile* source_oat_file = nullptr;
- {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (const OatFile* oat_file : oat_files_) {
- CHECK(oat_file != nullptr);
- if (oat_file_assistant.GivenOatFileIsUpToDate(*oat_file)) {
- source_oat_file = oat_file;
- break;
- }
- }
- }
-
- // If we didn't have an up-to-date oat file open, try to load one from disk.
- if (source_oat_file == nullptr) {
- // Update the oat file on disk if we can. This may fail, but that's okay.
- // Best effort is all that matters here.
- if (!oat_file_assistant.MakeUpToDate(&error_msg)) {
- LOG(WARNING) << error_msg;
- }
-
- // Get the oat file on disk.
- std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
- if (oat_file.get() != nullptr) {
- // Take the file only if it has no collisions, or we must take it because of preopting.
- bool accept_oat_file = !HasCollisions(oat_file.get(), &error_msg);
- if (!accept_oat_file) {
- // Failed the collision check. Print warning.
- if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- LOG(WARNING) << "Found duplicate classes, falling back to interpreter mode for "
- << dex_location;
- } else {
- LOG(WARNING) << "Found duplicate classes, dex-file-fallback disabled, will be failing to "
- " load classes for " << dex_location;
- }
- LOG(WARNING) << error_msg;
-
- // However, if the app was part of /system and preopted, there is no original dex file
- // available. In that case grudgingly accept the oat file.
- if (!DexFile::MaybeDex(dex_location)) {
- accept_oat_file = true;
- LOG(WARNING) << "Dex location " << dex_location << " does not seem to include dex file. "
- << "Allow oat file use. This is potentially dangerous.";
- }
- }
-
- if (accept_oat_file) {
- source_oat_file = oat_file.release();
- RegisterOatFile(source_oat_file);
- }
- }
- }
-
- std::vector<std::unique_ptr<const DexFile>> dex_files;
-
- // Load the dex files from the oat file.
- if (source_oat_file != nullptr) {
- dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
- if (dex_files.empty()) {
- error_msgs->push_back("Failed to open dex files from "
- + source_oat_file->GetLocation());
- }
- }
-
- // Fall back to running out of the original dex file if we couldn't load any
- // dex_files from the oat file.
- if (dex_files.empty()) {
- if (oat_file_assistant.HasOriginalDexFiles()) {
- if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- if (!DexFile::Open(dex_location, dex_location, &error_msg, &dex_files)) {
- LOG(WARNING) << error_msg;
- error_msgs->push_back("Failed to open dex files from " + std::string(dex_location));
- }
- } else {
- error_msgs->push_back("Fallback mode disabled, skipping dex files.");
- }
- } else {
- error_msgs->push_back("No original dex files found for dex location "
- + std::string(dex_location));
- }
- }
- return dex_files;
-}
-
-const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (size_t i = 0; i < oat_files_.size(); i++) {
- const OatFile* oat_file = oat_files_[i];
- DCHECK(oat_file != nullptr);
- if (oat_file->GetLocation() == oat_location) {
- return oat_file;
- }
- }
- return nullptr;
-}
-
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
gc::space::ImageSpace* space)
@@ -1169,16 +833,17 @@ void ClassLinker::InitFromImage() {
CHECK(space != nullptr);
image_pointer_size_ = space->GetImageHeader().GetPointerSize();
dex_cache_image_class_lookup_required_ = true;
- OatFile& oat_file = GetImageOatFile(space);
- CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
- CHECK_EQ(oat_file.GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
- const char* image_file_location = oat_file.GetOatHeader().
+ const OatFile* oat_file = runtime->GetOatFileManager().RegisterImageOatFile(space);
+ DCHECK(oat_file != nullptr);
+ CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatChecksum(), 0U);
+ CHECK_EQ(oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(), 0U);
+ const char* image_file_location = oat_file->GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
CHECK(image_file_location == nullptr || *image_file_location == 0);
- quick_resolution_trampoline_ = oat_file.GetOatHeader().GetQuickResolutionTrampoline();
- quick_imt_conflict_trampoline_ = oat_file.GetOatHeader().GetQuickImtConflictTrampoline();
- quick_generic_jni_trampoline_ = oat_file.GetOatHeader().GetQuickGenericJniTrampoline();
- quick_to_interpreter_bridge_trampoline_ = oat_file.GetOatHeader().GetQuickToInterpreterBridge();
+ quick_resolution_trampoline_ = oat_file->GetOatHeader().GetQuickResolutionTrampoline();
+ quick_imt_conflict_trampoline_ = oat_file->GetOatHeader().GetQuickImtConflictTrampoline();
+ quick_generic_jni_trampoline_ = oat_file->GetOatHeader().GetQuickGenericJniTrampoline();
+ quick_to_interpreter_bridge_trampoline_ = oat_file->GetOatHeader().GetQuickToInterpreterBridge();
StackHandleScope<2> hs(self);
mirror::Object* dex_caches_object = space->GetImageHeader().GetImageRoot(ImageHeader::kDexCaches);
Handle<mirror::ObjectArray<mirror::DexCache>> dex_caches(
@@ -1200,20 +865,20 @@ void ClassLinker::InitFromImage() {
java_lang_Object->GetObjectSize(),
VoidFunctor()));
- CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(),
+ CHECK_EQ(oat_file->GetOatHeader().GetDexFileCount(),
static_cast<uint32_t>(dex_caches->GetLength()));
for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
StackHandleScope<1> hs2(self);
Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i)));
const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
- const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(),
- nullptr);
- CHECK(oat_dex_file != nullptr) << oat_file.GetLocation() << " " << dex_file_location;
+ const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file_location.c_str(),
+ nullptr);
+ CHECK(oat_dex_file != nullptr) << oat_file->GetLocation() << " " << dex_file_location;
std::string error_msg;
std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
+ if (dex_file == nullptr) {
LOG(FATAL) << "Failed to open dex file " << dex_file_location
- << " from within oat file " << oat_file.GetLocation()
+ << " from within oat file " << oat_file->GetLocation()
<< " error '" << error_msg << "'";
UNREACHABLE();
}
@@ -1361,9 +1026,9 @@ void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
class_roots_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
VisitClassRoots(visitor, flags);
array_iftable_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- for (GcRoot<mirror::Class>& root : find_array_class_cache_) {
- root.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- }
+ // Instead of visiting the find_array_class_cache_ drop it so that it doesn't prevent class
+ // unloading if we are marking roots.
+ DropFindArrayClassCache();
}
class VisitClassLoaderClassesVisitor : public ClassLoaderVisitor {
@@ -1508,7 +1173,6 @@ ClassLinker::~ClassLinker() {
mirror::IntArray::ResetArrayClass();
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
- STLDeleteElements(&oat_files_);
Thread* const self = Thread::Current();
JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (const ClassLoaderData& data : class_loaders_) {
@@ -1525,7 +1189,9 @@ mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length
static_cast<mirror::Array*>(mirror::IntArray::Alloc(self, length)));
}
-mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc) {
StackHandleScope<6> hs(self);
auto dex_cache(hs.NewHandle(down_cast<mirror::DexCache*>(
GetClassRoot(kJavaLangDexCache)->AllocObject(self))));
@@ -1540,22 +1206,15 @@ mirror::DexCache* ClassLinker::AllocDexCache(Thread* self, const DexFile& dex_fi
}
DexCacheArraysLayout layout(image_pointer_size_, &dex_file);
uint8_t* raw_arrays = nullptr;
- if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
+ if (dex_file.GetOatDexFile() != nullptr &&
+ dex_file.GetOatDexFile()->GetDexCacheArrays() != nullptr) {
+ raw_arrays = const_cast<uint8_t*>(dex_file.GetOatDexFile()->GetDexCacheArrays());
+ } else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
// NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
- if (sizeof(void*) == 8u && image_pointer_size_ == 4u) {
- // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
- // in the low 4GiB address space so that we can store pointers in 32-bit fields.
- // This is conveniently provided by the linear allocator.
- raw_arrays = reinterpret_cast<uint8_t*>(
- Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size())); // Zero-initialized.
- } else {
- raw_arrays = reinterpret_cast<uint8_t*>(calloc(layout.Size(), 1u)); // Zero-initialized.
- if (raw_arrays == nullptr) {
- return nullptr;
- }
- }
+ // Zero-initialized.
+ raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
@@ -1768,13 +1427,18 @@ bool ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlreadyRunnable&
break;
}
int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
+ // First element is the oat file.
+ for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
long_array->GetWithoutChecks(j)));
const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor, hash);
if (dex_class_def != nullptr) {
- mirror::Class* klass = DefineClass(self, descriptor, hash, class_loader,
- *cp_dex_file, *dex_class_def);
+ mirror::Class* klass = DefineClass(self,
+ descriptor,
+ hash,
+ class_loader,
+ *cp_dex_file,
+ *dex_class_def);
if (klass == nullptr) {
CHECK(self->IsExceptionPending()) << descriptor;
self->ClearException();
@@ -1921,7 +1585,9 @@ mirror::Class* ClassLinker::DefineClass(Thread* self,
self->AssertPendingOOMException();
return nullptr;
}
- mirror::DexCache* dex_cache = RegisterDexFile(dex_file);
+ mirror::DexCache* dex_cache = RegisterDexFile(
+ dex_file,
+ GetOrCreateAllocatorForClassLoader(class_loader.Get()));
if (dex_cache == nullptr) {
self->AssertPendingOOMException();
return nullptr;
@@ -2424,6 +2090,19 @@ LinearAlloc* ClassLinker::GetAllocatorForClassLoader(mirror::ClassLoader* class_
return allocator;
}
+LinearAlloc* ClassLinker::GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader) {
+ if (class_loader == nullptr) {
+ return Runtime::Current()->GetLinearAlloc();
+ }
+ WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ LinearAlloc* allocator = class_loader->GetAllocator();
+ if (allocator == nullptr) {
+ allocator = Runtime::Current()->CreateLinearAlloc();
+ class_loader->SetAllocator(allocator);
+ }
+ return allocator;
+}
+
void ClassLinker::LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
@@ -2582,7 +2261,10 @@ void ClassLinker::LoadMethod(Thread* self,
void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(AllocDexCache(
+ self,
+ dex_file,
+ Runtime::Current()->GetLinearAlloc())));
CHECK(dex_cache.Get() != nullptr) << "Failed to allocate dex cache for "
<< dex_file.GetLocation();
AppendToBootClassPath(dex_file, dex_cache);
@@ -2618,7 +2300,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
dex_cache->SetDexFile(&dex_file);
}
-mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
+mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc) {
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
@@ -2631,7 +2313,7 @@ mirror::DexCache* ClassLinker::RegisterDexFile(const DexFile& dex_file) {
// suspend all threads and another thread may need the dex_lock_ to
// get to a suspend point.
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file)));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(AllocDexCache(self, dex_file, linear_alloc)));
WriterMutexLock mu(self, dex_lock_);
mirror::DexCache* dex_cache = FindDexCacheLocked(self, dex_file, true);
if (dex_cache != nullptr) {
@@ -3428,6 +3110,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
std::string descriptor(GetDescriptorForProxy(klass.Get()));
const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
+ // Needs to be before we insert the class so that the allocator field is set.
+ LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
+
// Insert the class before loading the fields as the field roots
// (ArtField::declaring_class_) are only visited from the class
// table. There can't be any suspend points between inserting the
@@ -3435,9 +3120,6 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
mirror::Class* existing = InsertClass(descriptor.c_str(), klass.Get(), hash);
CHECK(existing == nullptr);
- // Needs to be after we insert the class so that the allocator field is set.
- LinearAlloc* const allocator = GetAllocatorForClassLoader(klass->GetClassLoader());
-
// Instance fields are inherited, but we add a couple of static fields...
const size_t num_fields = 2;
LengthPrefixedArray<ArtField>* sfields = AllocArtFieldArray(self, allocator, num_fields);
@@ -3676,6 +3358,18 @@ bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_stati
return false;
}
}
+ // If we are a class we need to initialize all interfaces with default methods when we are
+ // initialized. Check all of them.
+ if (!klass->IsInterface()) {
+ size_t num_interfaces = klass->GetIfTableCount();
+ for (size_t i = 0; i < num_interfaces; i++) {
+ mirror::Class* iface = klass->GetIfTable()->GetInterface(i);
+ if (iface->HasDefaultMethods() &&
+ !CanWeInitializeClass(iface, can_init_statics, can_init_parents)) {
+ return false;
+ }
+ }
+ }
}
if (klass->IsInterface() || !klass->HasSuperClass()) {
return true;
@@ -3802,6 +3496,38 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
}
}
+ if (!klass->IsInterface()) {
+ // Initialize interfaces with default methods for the JLS.
+ size_t num_direct_interfaces = klass->NumDirectInterfaces();
+ // Only setup the (expensive) handle scope if we actually need to.
+ if (UNLIKELY(num_direct_interfaces > 0)) {
+ StackHandleScope<1> hs_iface(self);
+ MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr));
+ for (size_t i = 0; i < num_direct_interfaces; i++) {
+ handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass, i));
+ CHECK(handle_scope_iface.Get() != nullptr);
+ CHECK(handle_scope_iface->IsInterface());
+ if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
+ // We have already done this for this interface. Skip it.
+ continue;
+ }
+ // We cannot just call initialize class directly because we need to ensure that ALL
+ // interfaces with default methods are initialized. Non-default interface initialization
+ // will not affect other non-default super-interfaces.
+ bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
+ handle_scope_iface,
+ can_init_statics,
+ can_init_parents);
+ if (!iface_initialized) {
+ ObjectLock<mirror::Class> lock(self, klass);
+ // Initialization failed because one of our interfaces with default methods is erroneous.
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
+ return false;
+ }
+ }
+ }
+ }
+
const size_t num_static_fields = klass->NumStaticFields();
if (num_static_fields > 0) {
const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
@@ -3891,6 +3617,52 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
return success;
}
+// We recursively run down the tree of interfaces. We need to do this in the order they are declared
+// and perform the initialization only on those interfaces that contain default methods.
+bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
+ Handle<mirror::Class> iface,
+ bool can_init_statics,
+ bool can_init_parents) {
+ CHECK(iface->IsInterface());
+ size_t num_direct_ifaces = iface->NumDirectInterfaces();
+ // Only create the (expensive) handle scope if we need it.
+ if (UNLIKELY(num_direct_ifaces > 0)) {
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::Class> handle_super_iface(hs.NewHandle<mirror::Class>(nullptr));
+ // First we initialize all of iface's super-interfaces recursively.
+ for (size_t i = 0; i < num_direct_ifaces; i++) {
+ mirror::Class* super_iface = mirror::Class::GetDirectInterface(self, iface, i);
+ if (!super_iface->HasBeenRecursivelyInitialized()) {
+ // Recursive step
+ handle_super_iface.Assign(super_iface);
+ if (!InitializeDefaultInterfaceRecursive(self,
+ handle_super_iface,
+ can_init_statics,
+ can_init_parents)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ bool result = true;
+ // Then we initialize 'iface' if it has default methods. We do not need to (and in fact must not)
+ // initialize if we don't have default methods.
+ if (iface->HasDefaultMethods()) {
+ result = EnsureInitialized(self, iface, can_init_statics, can_init_parents);
+ }
+
+ // Mark that this interface has undergone recursive default interface initialization so we know we
+ // can skip it on any later class initializations. We do this even if we are not a default
+ // interface since we can still avoid the traversal. This is purely a performance optimization.
+ if (result) {
+ // TODO This should be done in a better way
+ ObjectLock<mirror::Class> lock(self, iface);
+ iface->SetRecursivelyInitialized();
+ }
+ return result;
+}
+
bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock)
@@ -4193,13 +3965,13 @@ ClassTable* ClassLinker::InsertClassTableForClassLoader(mirror::ClassLoader* cla
ClassLoaderData data;
data.weak_root = self->GetJniEnv()->vm->AddWeakGlobalRef(self, class_loader);
data.class_table = class_table;
- data.allocator = Runtime::Current()->CreateLinearAlloc();
- class_loaders_.push_back(data);
// Don't already have a class table, add it to the class loader.
CHECK(class_loader->GetClassTable() == nullptr);
- CHECK(class_loader->GetAllocator() == nullptr);
class_loader->SetClassTable(data.class_table);
- class_loader->SetAllocator(data.allocator);
+ // Should have been set when we registered the dex file.
+ data.allocator = class_loader->GetAllocator();
+ CHECK(data.allocator != nullptr);
+ class_loaders_.push_back(data);
}
return class_table;
}
@@ -4623,20 +4395,16 @@ bool ClassLinker::LinkMethods(Thread* self,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
ArtMethod** out_imt) {
self->AllowThreadSuspension();
- if (klass->IsInterface()) {
- // No vtable.
- size_t count = klass->NumVirtualMethods();
- if (!IsUint<16>(count)) {
- ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zd", count);
- return false;
- }
- for (size_t i = 0; i < count; ++i) {
- klass->GetVirtualMethodDuringLinking(i, image_pointer_size_)->SetMethodIndex(i);
- }
- } else if (!LinkVirtualMethods(self, klass)) { // Link virtual methods first.
- return false;
- }
- return LinkInterfaceMethods(self, klass, interfaces, out_imt); // Link interface method last.
+ // A map from vtable indexes to the method they need to be updated to point to. Used because we
+ // need to have default methods be in the virtuals array of each class but we don't set that up
+ // until LinkInterfaceMethods.
+ std::unordered_map<size_t, ArtMethod*> default_translations;
+ // Link virtual methods then interface methods.
+ // We set up the interface lookup table first because we need it to determine if we need to update
+ // any vtable entries with new default method implementations.
+ return SetupInterfaceLookupTable(self, klass, interfaces)
+ && LinkVirtualMethods(self, klass, /*out*/ &default_translations)
+ && LinkInterfaceMethods(self, klass, default_translations, out_imt);
}
// Comparator for name and signature of a method, used in finding overriding methods. Implementation
@@ -4760,9 +4528,36 @@ class LinkVirtualHashTable {
const uint32_t LinkVirtualHashTable::invalid_index_ = std::numeric_limits<uint32_t>::max();
const uint32_t LinkVirtualHashTable::removed_index_ = std::numeric_limits<uint32_t>::max() - 1;
-bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
+bool ClassLinker::LinkVirtualMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations) {
const size_t num_virtual_methods = klass->NumVirtualMethods();
- if (klass->HasSuperClass()) {
+ if (klass->IsInterface()) {
+ // No vtable.
+ if (!IsUint<16>(num_virtual_methods)) {
+ ThrowClassFormatError(klass.Get(), "Too many methods on interface: %zu", num_virtual_methods);
+ return false;
+ }
+ bool has_defaults = false;
+ // TODO May need to replace this with real VTable for invoke_super
+ // Assign each method an IMT index and set the default flag.
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ ArtMethod* m = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
+ m->SetMethodIndex(i);
+ if (!m->IsAbstract()) {
+ m->SetAccessFlags(m->GetAccessFlags() | kAccDefault);
+ has_defaults = true;
+ }
+ }
+ // Mark that we have default methods so that we won't need to scan the virtual_methods_ array
+ // during initialization. This is a performance optimization. We could simply traverse the
+ // virtual_methods_ array again during initialization.
+ if (has_defaults) {
+ klass->SetHasDefaultMethods();
+ }
+ return true;
+ } else if (klass->HasSuperClass()) {
const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
const size_t max_count = num_virtual_methods + super_vtable_length;
StackHandleScope<2> hs(self);
@@ -4778,14 +4573,22 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
vtable->SetElementPtrSize(
i, super_class->GetEmbeddedVTableEntry(i, image_pointer_size_), image_pointer_size_);
}
- if (num_virtual_methods == 0) {
+ // We might need to change vtable if we have new virtual methods or new interfaces (since that
+ // might give us new default methods). If no new interfaces then we can skip the rest since
+ // the class cannot override any of the super-class's methods. This is required for
+ // correctness since without it we might not update overridden default method vtable entries
+ // correctly.
+ if (num_virtual_methods == 0 && super_class->GetIfTableCount() == klass->GetIfTableCount()) {
klass->SetVTable(vtable.Get());
return true;
}
} else {
+ DCHECK(super_class->IsAbstract() && !super_class->IsArrayClass());
auto* super_vtable = super_class->GetVTable();
CHECK(super_vtable != nullptr) << PrettyClass(super_class.Get());
- if (num_virtual_methods == 0) {
+ // We might need to change vtable if we have new virtual methods or new interfaces (since that
+ // might give us new default methods). See comment above.
+ if (num_virtual_methods == 0 && super_class->GetIfTableCount() == klass->GetIfTableCount()) {
klass->SetVTable(super_vtable);
return true;
}
@@ -4806,7 +4609,9 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
// the need for the initial vtable which we later shrink back down).
// 3. Add non overridden methods to the end of the vtable.
static constexpr size_t kMaxStackHash = 250;
- const size_t hash_table_size = num_virtual_methods * 3;
+ // + 1 so that even if we only have new default methods we will still be able to use this hash
+ // table (i.e. it will never have 0 size).
+ const size_t hash_table_size = num_virtual_methods * 3 + 1;
uint32_t* hash_table_ptr;
std::unique_ptr<uint32_t[]> hash_heap_storage;
if (hash_table_size <= kMaxStackHash) {
@@ -4823,10 +4628,10 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
i, image_pointer_size_)->GetDeclaringClass() != nullptr);
hash_table.Add(i);
}
- // Loop through each super vtable method and see if they are overriden by a method we added to
+ // Loop through each super vtable method and see if they are overridden by a method we added to
// the hash table.
for (size_t j = 0; j < super_vtable_length; ++j) {
- // Search the hash table to see if we are overidden by any method.
+ // Search the hash table to see if we are overridden by any method.
ArtMethod* super_method = vtable->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
MethodNameAndSignatureComparator super_method_name_comparator(
super_method->GetInterfaceMethodIfProxy(image_pointer_size_));
@@ -4849,10 +4654,51 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
<< " would have incorrectly overridden the package-private method in "
<< PrettyDescriptor(super_method->GetDeclaringClassDescriptor());
}
+ } else if (super_method->IsDefault()) {
+ // We didn't directly override this method but we might through default methods...
+ // Check for default method update.
+ ArtMethod* default_method = nullptr;
+ std::string icce_message;
+ if (!FindDefaultMethodImplementation(self,
+ super_method,
+ klass,
+ /*out*/&default_method,
+ /*out*/&icce_message)) {
+ // An error occurred while finding default methods.
+ // TODO This should actually be thrown when we attempt to invoke this method.
+ ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
+ return false;
+ }
+ // This should always work because we inherit superclass interfaces. We should either get
+ // 1) An IncompatibleClassChangeError because of conflicting default method
+ // implementations.
+ // 2) The same default method implementation as the superclass.
+ // 3) A default method that overrides the superclass's.
+ // Therefore this check should never fail.
+ CHECK(default_method != nullptr);
+ if (UNLIKELY(default_method->GetDeclaringClass() != super_method->GetDeclaringClass())) {
+ // TODO Refactor this add default methods to virtuals here and not in
+ // LinkInterfaceMethods maybe.
+ // The problem is default methods might override previously present default-method or
+ // miranda-method vtable entries from the superclass. Unfortunately we need these to
+ // be entries in this class's virtuals. We do not give these entries there until
+ // LinkInterfaceMethods so we pass this map around to let it know which vtable
+ // entries need to be updated.
+ // Make a note that vtable entry j must be updated, store what it needs to be updated to.
+ // We will allocate a virtual method slot in LinkInterfaceMethods and fix it up then.
+ default_translations->insert({j, default_method});
+ VLOG(class_linker) << "Method " << PrettyMethod(super_method) << " overridden by default "
+ << PrettyMethod(default_method) << " in " << PrettyClass(klass.Get());
+ } else {
+ // They are the same method/no override
+ // Cannot do direct comparison because we had to copy the ArtMethod object into the
+ // superclass's vtable.
+ continue;
+ }
}
}
- // Add the non overridden methods at the end.
size_t actual_count = super_vtable_length;
+ // Add the non-overridden methods at the end.
for (size_t i = 0; i < num_virtual_methods; ++i) {
ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
size_t method_idx = local_method->GetMethodIndexDuringLinking();
@@ -4900,20 +4746,223 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
return true;
}
-bool ClassLinker::LinkInterfaceMethods(Thread* self,
- Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces,
- ArtMethod** out_imt) {
- StackHandleScope<3> hs(self);
- Runtime* const runtime = Runtime::Current();
- const bool has_superclass = klass->HasSuperClass();
- const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+// Find the default method implementation for 'interface_method' in 'klass'. Stores it into
+// out_default_method and returns true on success. If no default method was found stores nullptr
+// into out_default_method and returns true. If an error occurs (such as a default_method conflict)
+// it will fill the icce_message with an appropriate message for an IncompatibleClassChangeError,
+// which should then be thrown by the caller.
+bool ClassLinker::FindDefaultMethodImplementation(Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method,
+ /*out*/std::string* icce_message) const {
+ DCHECK(self != nullptr);
+ DCHECK(target_method != nullptr);
+ DCHECK(out_default_method != nullptr);
+ DCHECK(icce_message != nullptr);
+
+ *out_default_method = nullptr;
+ mirror::Class* chosen_iface = nullptr;
+
+ // We organize the interface table so that, for interface I any subinterfaces J follow it in the
+ // table. This lets us walk the table backwards when searching for default methods. The first one
+ // we encounter is the best candidate since it is the most specific. Once we have found it we keep
+ // track of it and then continue checking all other interfaces, since we need to throw an error if
+ // we encounter conflicting default method implementations (one is not a subtype of the other).
+ //
+ // The order of unrelated interfaces does not matter and is not defined.
+ size_t iftable_count = klass->GetIfTableCount();
+ if (iftable_count == 0) {
+ // No interfaces. We have already reset out to null so just return true.
+ return true;
+ }
+
+ StackHandleScope<1> hs(self);
+ MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
+ MethodNameAndSignatureComparator target_name_comparator(
+ target_method->GetInterfaceMethodIfProxy(image_pointer_size_));
+ // Iterates over the klass's iftable in reverse
+ // We have a break at the end because size_t is unsigned.
+ for (size_t k = iftable_count - 1; /* break if k == 0 at end */; --k) {
+ DCHECK_LT(k, iftable->Count());
+ mirror::Class* iface = iftable->GetInterface(k);
+ size_t num_instance_methods = iface->NumVirtualMethods();
+ // Iterate through every method on this interface. The order does not matter so we go forwards.
+ for (size_t m = 0; m < num_instance_methods; m++) {
+ ArtMethod* current_method = iface->GetVirtualMethodUnchecked(m, image_pointer_size_);
+ // Skip abstract methods and methods with different names.
+ if (current_method->IsAbstract() ||
+ !target_name_comparator.HasSameNameAndSignature(
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) {
+ continue;
+ }
+ // The verifier should have caught the non-public method.
+ DCHECK(current_method->IsPublic()) << "Interface method is not public!";
+ if (UNLIKELY(chosen_iface != nullptr)) {
+ // We have multiple default impls of the same method. We need to check they do not
+ // conflict and throw an error if they do. Conflicting means that the current iface is not
+ // masked by the chosen interface.
+ if (!iface->IsAssignableFrom(chosen_iface)) {
+ *icce_message = StringPrintf("Conflicting default method implementations: '%s' and '%s'",
+ PrettyMethod(current_method).c_str(),
+ PrettyMethod(*out_default_method).c_str());
+ return false;
+ } else {
+ break; // Continue checking at the next interface.
+ }
+ } else {
+ *out_default_method = current_method;
+ chosen_iface = iface;
+ // We should now finish traversing the graph to find if we have default methods that
+ // conflict.
+ break;
+ }
+ }
+ if (k == 0) {
+ break;
+ }
+ }
+ return true;
+}
+
+// Sets imt_ref appropriately for LinkInterfaceMethods.
+// If there is no method in the imt location of imt_ref it will store the given method there.
+// Otherwise it will set the conflict method which will figure out which method to use during
+// runtime.
+static void SetIMTRef(ArtMethod* unimplemented_method,
+ ArtMethod* conflict_method,
+ size_t image_pointer_size,
+ ArtMethod* current_method,
+ /*out*/ArtMethod** imt_ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Place method in imt if entry is empty, place conflict otherwise.
+ if (*imt_ref == unimplemented_method) {
+ *imt_ref = current_method;
+ } else if (*imt_ref != conflict_method) {
+ // If we are not a conflict and we have the same signature and name as the imt
+ // entry, it must be that we overwrote a superclass vtable entry.
+ MethodNameAndSignatureComparator imt_comparator(
+ (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size));
+ if (imt_comparator.HasSameNameAndSignature(
+ current_method->GetInterfaceMethodIfProxy(image_pointer_size))) {
+ *imt_ref = current_method;
+ } else {
+ *imt_ref = conflict_method;
+ }
+ }
+}
+
+// Simple helper function that checks that no subtypes of 'val' are contained within the 'classes'
+// set.
+static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes,
+ mirror::Class* val)
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(val != nullptr);
+ for (auto c : classes) {
+ if (val->IsAssignableFrom(&*c)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Fills in and flattens the interface inheritance hierarchy.
+//
+// By the end of this function all interfaces in the transitive closure of to_process are added to
+// the iftable and every interface precedes all of its sub-interfaces in this list.
+//
+// all I, J: Interface | I <: J implies J precedes I
+//
+// (note A <: B means that A is a subtype of B)
+//
+// This returns the total number of items in the iftable. The iftable might be resized down after
+// this call.
+//
+// We order this backwards so that we do not need to reorder superclass interfaces when new
+// interfaces are added in subclass's interface tables.
+//
+// Upon entry into this function iftable is a copy of the superclass's iftable with the first
+// super_ifcount entries filled in with the transitive closure of the interfaces of the superclass.
+// The other entries are uninitialized. We will fill in the remaining entries in this function. The
+// iftable must be large enough to hold all interfaces without changing its size.
+static size_t FillIfTable(mirror::IfTable* iftable,
+ size_t super_ifcount,
+ std::vector<mirror::Class*> to_process)
+ REQUIRES(Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // This is the set of all class's already in the iftable. Used to make checking if a class has
+ // already been added quicker.
+ std::unordered_set<mirror::Class*> classes_in_iftable;
+ // The first super_ifcount elements are from the superclass. We note that they are already added.
+ for (size_t i = 0; i < super_ifcount; i++) {
+ mirror::Class* iface = iftable->GetInterface(i);
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, iface)) << "Bad ordering.";
+ classes_in_iftable.insert(iface);
+ }
+ size_t filled_ifcount = super_ifcount;
+ for (mirror::Class* interface : to_process) {
+ // Let us call the first filled_ifcount elements of iftable the current-iface-list.
+ // At this point in the loop current-iface-list has the invariant that:
+ // for every pair of interfaces I,J within it:
+ // if index_of(I) < index_of(J) then I is not a subtype of J
+
+ // If we have already seen this element then all of its super-interfaces must already be in the
+ // current-iface-list so we can skip adding it.
+ if (!ContainsElement(classes_in_iftable, interface)) {
+ // We haven't seen this interface so add all of its super-interfaces onto the
+ // current-iface-list, skipping those already on it.
+ int32_t ifcount = interface->GetIfTableCount();
+ for (int32_t j = 0; j < ifcount; j++) {
+ mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
+ if (!ContainsElement(classes_in_iftable, super_interface)) {
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, super_interface)) << "Bad ordering.";
+ classes_in_iftable.insert(super_interface);
+ iftable->SetInterface(filled_ifcount, super_interface);
+ filled_ifcount++;
+ }
+ }
+ DCHECK(NotSubinterfaceOfAny(classes_in_iftable, interface)) << "Bad ordering";
+ // Place this interface onto the current-iface-list after all of its super-interfaces.
+ classes_in_iftable.insert(interface);
+ iftable->SetInterface(filled_ifcount, interface);
+ filled_ifcount++;
+ } else if (kIsDebugBuild) {
+ // Check all super-interfaces are already in the list.
+ int32_t ifcount = interface->GetIfTableCount();
+ for (int32_t j = 0; j < ifcount; j++) {
+ mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
+ DCHECK(ContainsElement(classes_in_iftable, super_interface))
+ << "Iftable does not contain " << PrettyClass(super_interface)
+ << ", a superinterface of " << PrettyClass(interface);
+ }
+ }
+ }
+ if (kIsDebugBuild) {
+ // Check that the iftable is ordered correctly.
+ for (size_t i = 0; i < filled_ifcount; i++) {
+ mirror::Class* if_a = iftable->GetInterface(i);
+ for (size_t j = i + 1; j < filled_ifcount; j++) {
+ mirror::Class* if_b = iftable->GetInterface(j);
+ // !(if_a <: if_b)
+ CHECK(!if_b->IsAssignableFrom(if_a))
+ << "Bad interface order: " << PrettyClass(if_a) << " (index " << i << ") extends "
+ << PrettyClass(if_b) << " (index " << j << ") and so should be after it in the "
+ << "interface list.";
+ }
+ }
+ }
+ return filled_ifcount;
+}
+
+bool ClassLinker::SetupInterfaceLookupTable(Thread* self, Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+ StackHandleScope<1> hs(self);
+ const size_t super_ifcount =
+ klass->HasSuperClass() ? klass->GetSuperClass()->GetIfTableCount() : 0U;
const bool have_interfaces = interfaces.Get() != nullptr;
- const size_t num_interfaces = have_interfaces
- ? interfaces->GetLength()
- : klass->NumDirectInterfaces();
- const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
- const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t num_interfaces =
+ have_interfaces ? interfaces->GetLength() : klass->NumDirectInterfaces();
if (num_interfaces == 0) {
if (super_ifcount == 0) {
// Class implements no interfaces.
@@ -4937,6 +4986,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
}
size_t ifcount = super_ifcount + num_interfaces;
+ // Check that every class being implemented is an interface.
for (size_t i = 0; i < num_interfaces; i++) {
mirror::Class* interface = have_interfaces
? interfaces->GetWithoutChecks(i)
@@ -4952,11 +5002,13 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
ifcount += interface->GetIfTableCount();
}
+ // Create the interface function table.
MutableHandle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == nullptr)) {
self->AssertPendingOOMException();
return false;
}
+ // Fill in table with superclass's iftable.
if (super_ifcount != 0) {
mirror::IfTable* super_iftable = klass->GetSuperClass()->GetIfTable();
for (size_t i = 0; i < super_ifcount; i++) {
@@ -4964,56 +5016,59 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
iftable->SetInterface(i, super_interface);
}
}
+
+ // Note that AllowThreadSuspension is to thread suspension as pthread_testcancel is to pthread
+ // cancellation. That is it will suspend if one has a pending suspend request but otherwise
+ // doesn't really do anything.
self->AllowThreadSuspension();
- // Flatten the interface inheritance hierarchy.
- size_t idx = super_ifcount;
- for (size_t i = 0; i < num_interfaces; i++) {
- mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
- mirror::Class::GetDirectInterface(self, klass, i);
- // Check if interface is already in iftable
- bool duplicate = false;
- for (size_t j = 0; j < idx; j++) {
- mirror::Class* existing_interface = iftable->GetInterface(j);
- if (existing_interface == interface) {
- duplicate = true;
- break;
- }
- }
- if (!duplicate) {
- // Add this non-duplicate interface.
- iftable->SetInterface(idx++, interface);
- // Add this interface's non-duplicate super-interfaces.
- for (int32_t j = 0; j < interface->GetIfTableCount(); j++) {
- mirror::Class* super_interface = interface->GetIfTable()->GetInterface(j);
- bool super_duplicate = false;
- for (size_t k = 0; k < idx; k++) {
- mirror::Class* existing_interface = iftable->GetInterface(k);
- if (existing_interface == super_interface) {
- super_duplicate = true;
- break;
- }
- }
- if (!super_duplicate) {
- iftable->SetInterface(idx++, super_interface);
- }
- }
+
+ size_t new_ifcount;
+ {
+ ScopedAssertNoThreadSuspension nts(self, "Copying mirror::Class*'s for FillIfTable");
+ std::vector<mirror::Class*> to_add;
+ for (size_t i = 0; i < num_interfaces; i++) {
+ mirror::Class* interface = have_interfaces ? interfaces->Get(i) :
+ mirror::Class::GetDirectInterface(self, klass, i);
+ to_add.push_back(interface);
}
+
+ new_ifcount = FillIfTable(iftable.Get(), super_ifcount, std::move(to_add));
}
+
self->AllowThreadSuspension();
+
// Shrink iftable in case duplicates were found
- if (idx < ifcount) {
+ if (new_ifcount < ifcount) {
DCHECK_NE(num_interfaces, 0U);
iftable.Assign(down_cast<mirror::IfTable*>(
- iftable->CopyOf(self, idx * mirror::IfTable::kMax)));
+ iftable->CopyOf(self, new_ifcount * mirror::IfTable::kMax)));
if (UNLIKELY(iftable.Get() == nullptr)) {
self->AssertPendingOOMException();
return false;
}
- ifcount = idx;
+ ifcount = new_ifcount;
} else {
- DCHECK_EQ(idx, ifcount);
+ DCHECK_EQ(new_ifcount, ifcount);
}
klass->SetIfTable(iftable.Get());
+ return true;
+}
+
+bool ClassLinker::LinkInterfaceMethods(
+ Thread* self,
+ Handle<mirror::Class> klass,
+ const std::unordered_map<size_t, ArtMethod*>& default_translations,
+ ArtMethod** out_imt) {
+ StackHandleScope<3> hs(self);
+ Runtime* const runtime = Runtime::Current();
+ const bool has_superclass = klass->HasSuperClass();
+ const size_t super_ifcount = has_superclass ? klass->GetSuperClass()->GetIfTableCount() : 0U;
+ const size_t method_alignment = ArtMethod::Alignment(image_pointer_size_);
+ const size_t method_size = ArtMethod::Size(image_pointer_size_);
+ const size_t ifcount = klass->GetIfTableCount();
+
+ MutableHandle<mirror::IfTable> iftable(hs.NewHandle(klass->GetIfTable()));
+
// If we're an interface, we don't need the vtable pointers, so we're done.
if (klass->IsInterface()) {
return true;
@@ -5026,6 +5081,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
ArenaStack stack(runtime->GetLinearAlloc()->GetArenaPool());
ScopedArenaAllocator allocator(&stack);
ScopedArenaVector<ArtMethod*> miranda_methods(allocator.Adapter());
+ ScopedArenaVector<ArtMethod*> default_methods(allocator.Adapter());
MutableHandle<mirror::PointerArray> vtable(hs.NewHandle(klass->GetVTableDuringLinking()));
ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod();
@@ -5055,7 +5111,9 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
for (size_t j = 0; j < num_virtuals; ++j) {
auto method = method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_);
DCHECK(method != nullptr) << PrettyClass(super_class);
- if (method->IsMiranda()) {
+ // Miranda methods cannot be used to implement an interface method and defaults should be
+ // skipped in case we override it.
+ if (method->IsDefault() || method->IsMiranda()) {
continue;
}
ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_);
@@ -5076,6 +5134,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
const bool is_super = i < super_ifcount;
+ // This is an interface implemented by a super-class. Therefore we can just copy the method
+ // array from the superclass.
const bool super_interface = is_super && extend_super_iftable;
mirror::PointerArray* method_array;
if (super_interface) {
@@ -5119,16 +5179,13 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
input_vtable_array = vtable;
input_array_length = input_vtable_array->GetLength();
}
- if (input_array_length == 0) {
- // If the added virtual methods is empty, do nothing.
- DCHECK(super_interface);
- continue;
- }
+ // For each method in interface
for (size_t j = 0; j < num_methods; ++j) {
auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_);
MethodNameAndSignatureComparator interface_name_comparator(
interface_method->GetInterfaceMethodIfProxy(image_pointer_size_));
- int32_t k;
+ uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ ArtMethod** imt_ptr = &out_imt[imt_index];
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
// subclass over the superclass, which just requires walking
@@ -5137,7 +5194,12 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
// it -- otherwise it would use the same vtable slot. In .dex files
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
- for (k = input_array_length - 1; k >= 0; --k) {
+ //
+ // To find defaults we need to do the same but also go over interfaces.
+ bool found_impl = false;
+ ArtMethod* default_impl = nullptr;
+ bool found_default_impl = false;
+ for (int32_t k = input_array_length - 1; k >= 0; --k) {
ArtMethod* vtable_method = input_virtual_methods != nullptr ?
&input_virtual_methods->At(k, method_size, method_alignment) :
input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
@@ -5153,25 +5215,69 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
"Method '%s' implementing interface method '%s' is not public",
PrettyMethod(vtable_method).c_str(), PrettyMethod(interface_method).c_str());
return false;
+ } else if (vtable_method->IsDefault()) {
+ // We might have a newer, better, default method for this, so we just skip it. If we
+ // are still using this we will select it again when scanning for default methods. To
+ // obviate the need to copy the method again we will make a note that we already found
+ // a default here.
+ // TODO This should be much cleaner.
+ found_default_impl = true;
+ default_impl = vtable_method;
+ break;
+ } else {
+ found_impl = true;
}
method_array->SetElementPtrSize(j, vtable_method, image_pointer_size_);
// Place method in imt if entry is empty, place conflict otherwise.
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
- auto** imt_ref = &out_imt[imt_index];
- if (*imt_ref == unimplemented_method) {
- *imt_ref = vtable_method;
- } else if (*imt_ref != conflict_method) {
- // If we are not a conflict and we have the same signature and name as the imt entry,
- // it must be that we overwrote a superclass vtable entry.
- MethodNameAndSignatureComparator imt_comparator(
- (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_));
- *imt_ref = imt_comparator.HasSameNameAndSignature(vtable_method_for_name_comparison) ?
- vtable_method : conflict_method;
- }
+ SetIMTRef(unimplemented_method,
+ conflict_method,
+ image_pointer_size_,
+ vtable_method,
+ /*out*/imt_ptr);
break;
}
}
- if (k < 0 && !super_interface) {
+ // We should only search for default implementations when the class does not implement the
+ // method directly and either (1) the interface is newly implemented on this class and not
+ // on any of its superclasses, (2) the superclass's implementation is a default method, or
+ // (3) the superclass does not have an implementation.
+ if (!found_impl && (!super_interface ||
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_)
+ ->IsOverridableByDefaultMethod())) {
+ ArtMethod* current_method = nullptr;
+ std::string icce_message;
+ if (!FindDefaultMethodImplementation(self,
+ interface_method,
+ klass,
+ /*out*/&current_method,
+ /*out*/&icce_message)) {
+ // There was a conflict with default method implementations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ // TODO This should actually be thrown when we attempt to invoke this method.
+ ThrowIncompatibleClassChangeError(klass.Get(), "%s", icce_message.c_str());
+ return false;
+ } else if (current_method != nullptr) {
+ if (found_default_impl &&
+ current_method->GetDeclaringClass() == default_impl->GetDeclaringClass()) {
+ // We found a default method but it was the same one we already have from our
+ // superclass. Don't bother adding it to our vtable again.
+ current_method = default_impl;
+ } else {
+ // We found a default method implementation and there were no conflicts.
+ // Save the default method. We need to add it to the vtable.
+ default_methods.push_back(current_method);
+ }
+ method_array->SetElementPtrSize(j, current_method, image_pointer_size_);
+ SetIMTRef(unimplemented_method,
+ conflict_method,
+ image_pointer_size_,
+ current_method,
+ /*out*/imt_ptr);
+ found_impl = true;
+ }
+ }
+ if (!found_impl && !super_interface) {
+ // It is defined in this class or any of its subclasses.
ArtMethod* miranda_method = nullptr;
for (auto& mir_method : miranda_methods) {
if (interface_name_comparator.HasSameNameAndSignature(mir_method)) {
@@ -5191,9 +5297,10 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
}
}
}
- if (!miranda_methods.empty()) {
+ if (!miranda_methods.empty() || !default_methods.empty()) {
const size_t old_method_count = klass->NumVirtualMethods();
- const size_t new_method_count = old_method_count + miranda_methods.size();
+ const size_t new_method_count =
+ old_method_count + miranda_methods.size() + default_methods.size();
// Attempt to realloc to save RAM if possible.
LengthPrefixedArray<ArtMethod>* old_virtuals = klass->GetVirtualMethodsPtr();
// The Realloced virtual methods aren't visiblef from the class roots, so there is no issue
@@ -5228,13 +5335,36 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment) + old_method_count);
+ StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment)
+ + old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
for (ArtMethod* mir_method : miranda_methods) {
- out->CopyFrom(mir_method, image_pointer_size_);
- out->SetAccessFlags(out->GetAccessFlags() | kAccMiranda);
- move_table.emplace(mir_method, &*out);
+ ArtMethod& new_method = *out;
+ new_method.CopyFrom(mir_method, image_pointer_size_);
+ new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccMiranda);
+ DCHECK_NE(new_method.GetAccessFlags() & kAccAbstract, 0u)
+ << "Miranda method should be abstract!";
+ move_table.emplace(mir_method, &new_method);
+ ++out;
+ }
+ // We need to copy the default methods into our own virtual method table since the runtime
+ // requires that every method on a class's vtable be in that respective class's virtual method
+ // table.
+ // NOTE This means that two classes might have the same implementation of a method from the same
+ // interface but will have different ArtMethod*s for them. This also means we cannot compare a
+ // default method found on a class with one found on the declaring interface directly and must
+ // look at the declaring class to determine if they are the same.
+ for (ArtMethod* def_method : default_methods) {
+ ArtMethod& new_method = *out;
+ new_method.CopyFrom(def_method, image_pointer_size_);
+ new_method.SetAccessFlags(new_method.GetAccessFlags() | kAccDefault);
+ // Clear the preverified flag if it is present. Since this class hasn't been verified yet it
+ // shouldn't have methods that are preverified.
+ // TODO This is rather arbitrary. We should maybe support classes where only some of its
+ // methods are preverified.
+ new_method.SetAccessFlags(new_method.GetAccessFlags() & ~kAccPreverified);
+ move_table.emplace(def_method, &new_method);
++out;
}
virtuals->SetLength(new_method_count);
@@ -5244,7 +5374,8 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
self->EndAssertNoThreadSuspension(old_cause);
const size_t old_vtable_count = vtable->GetLength();
- const size_t new_vtable_count = old_vtable_count + miranda_methods.size();
+ const size_t new_vtable_count =
+ old_vtable_count + miranda_methods.size() + default_methods.size();
miranda_methods.clear();
vtable.Assign(down_cast<mirror::PointerArray*>(vtable->CopyOf(self, new_vtable_count)));
if (UNLIKELY(vtable.Get() == nullptr)) {
@@ -5261,15 +5392,29 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
++vtable_pos;
}
CHECK_EQ(vtable_pos, new_vtable_count);
- // Update old vtable methods.
+ // Update old vtable methods. We use the default_translations map to figure out what each vtable
+ // entry should be updated to, if they need to be at all.
for (size_t i = 0; i < old_vtable_count; ++i) {
- auto* m = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
- DCHECK(m != nullptr) << PrettyClass(klass.Get());
- auto it = move_table.find(m);
+ ArtMethod* translated_method = vtable->GetElementPtrSize<ArtMethod*>(i, image_pointer_size_);
+ // Try and find what we need to change this method to.
+ auto translation_it = default_translations.find(i);
+ bool found_translation = false;
+ if (translation_it != default_translations.end()) {
+ size_t vtable_index;
+ std::tie(vtable_index, translated_method) = *translation_it;
+ DCHECK_EQ(vtable_index, i);
+ found_translation = true;
+ }
+ DCHECK(translated_method != nullptr);
+ auto it = move_table.find(translated_method);
if (it != move_table.end()) {
- auto* new_m = it->second;
- DCHECK(new_m != nullptr) << PrettyClass(klass.Get());
- vtable->SetElementPtrSize(i, new_m, image_pointer_size_);
+ auto* new_method = it->second;
+ DCHECK(new_method != nullptr);
+ vtable->SetElementPtrSize(i, new_method, image_pointer_size_);
+ } else {
+ // If it was not going to be updated we wouldn't have put it into the default_translations
+ // map.
+ CHECK(!found_translation) << "We were asked to update this vtable entry. Must not fail.";
}
}
@@ -5300,7 +5445,11 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self,
auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
for (size_t i = 0, count = klass->GetDexCache()->NumResolvedMethods(); i < count; ++i) {
auto* m = mirror::DexCache::GetElementPtrSize(resolved_methods, i, image_pointer_size_);
- CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
+ // We don't remove default methods from the move table since we need them to update the
+ // vtable. Therefore just skip them for this check.
+ if (!m->IsDefault()) {
+ CHECK(move_table.find(m) == move_table.end()) << PrettyMethod(m);
+ }
}
}
// Put some random garbage in old virtuals to help find stale pointers.
@@ -6075,7 +6224,8 @@ const char* ClassLinker::GetClassRootDescriptor(ClassRoot class_root) {
}
bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
- if (Runtime::Current()->UseJit()) {
+ Runtime* const runtime = Runtime::Current();
+ if (runtime->UseJit()) {
// JIT can have direct code pointers from any method to any other method.
return true;
}
@@ -6097,13 +6247,7 @@ bool ClassLinker::MayBeCalledWithDirectCodePointer(ArtMethod* m) {
} else {
// The method can be called outside its own oat file. Therefore it won't be called using its
// direct code pointer only if all loaded oat files have been compiled in PIC mode.
- ReaderMutexLock mu(Thread::Current(), dex_lock_);
- for (const OatFile* oat_file : oat_files_) {
- if (!oat_file->IsPic()) {
- return true;
- }
- }
- return false;
+ return runtime->GetOatFileManager().HaveNonPicOatFile();
}
}
@@ -6138,9 +6282,13 @@ jobject ClassLinker::CreatePathClassLoader(Thread* self, std::vector<const DexFi
for (const DexFile* dex_file : dex_files) {
StackHandleScope<3> hs2(self);
- Handle<mirror::LongArray> h_long_array = hs2.NewHandle(mirror::LongArray::Alloc(self, 1));
+ // CreatePathClassLoader is only used by gtests. Index 0 of h_long_array is supposed to be the
+ // oat file but we can leave it null.
+ Handle<mirror::LongArray> h_long_array = hs2.NewHandle(mirror::LongArray::Alloc(
+ self,
+ kDexFileIndexStart + 1));
DCHECK(h_long_array.Get() != nullptr);
- h_long_array->Set(0, reinterpret_cast<intptr_t>(dex_file));
+ h_long_array->Set(kDexFileIndexStart, reinterpret_cast<intptr_t>(dex_file));
Handle<mirror::Object> h_dex_file = hs2.NewHandle(
cookie_field->GetDeclaringClass()->AllocObject(self));
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 7f3e93806e..fd30a46a1b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_LINKER_H_
#include <string>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -318,24 +319,17 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
- mirror::DexCache* RegisterDexFile(const DexFile& dex_file)
+ mirror::DexCache* RegisterDexFile(const DexFile& dex_file, LinearAlloc* linear_alloc)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- const OatFile* RegisterOatFile(const OatFile* oat_file)
- REQUIRES(!dex_lock_);
-
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
}
- // Returns the first non-image oat file in the class path.
- const OatFile* GetPrimaryOatFile()
- REQUIRES(!dex_lock_);
-
void VisitClasses(ClassVisitor* visitor)
REQUIRES(!Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -364,26 +358,6 @@ class ClassLinker {
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- // Finds or creates the oat file holding dex_location. Then loads and returns
- // all corresponding dex files (there may be more than one dex file loaded
- // in the case of multidex).
- // This may return the original, unquickened dex files if the oat file could
- // not be generated.
- //
- // Returns an empty vector if the dex files could not be loaded. In this
- // case, there will be at least one error message returned describing why no
- // dex files could not be loaded. The 'error_msgs' argument must not be
- // null, regardless of whether there is an error or not.
- //
- // This method should not be called with the mutator_lock_ held, because it
- // could end up starving GC if we need to generate or relocate any oat
- // files.
- std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
- const char* dex_location,
- const char* oat_location,
- std::vector<std::string>* error_msgs)
- REQUIRES(!dex_lock_, !Locks::mutator_lock_);
-
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
@@ -555,9 +529,17 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
+ // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
+ // allocator for this class loader is already created.
static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
+ // set it. TODO: Consider using a lock other than classlinker_classes_lock_.
+ static LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!Locks::classlinker_classes_lock_);
+
private:
struct ClassLoaderData {
jweak weak_root; // Weak root to enable class unloading.
@@ -581,10 +563,6 @@ class ClassLinker {
REQUIRES(Locks::classlinker_classes_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
- OatFile& GetImageOatFile(gc::space::ImageSpace* space)
- REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
void FinishInit(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
@@ -600,7 +578,9 @@ class ClassLinker {
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file)
+ mirror::DexCache* AllocDexCache(Thread* self,
+ const DexFile& dex_file,
+ LinearAlloc* linear_alloc)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
@@ -679,6 +659,12 @@ class ClassLinker {
bool can_init_parents)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
+ bool InitializeDefaultInterfaceRecursive(Thread* self,
+ Handle<mirror::Class> klass,
+ bool can_run_clinit,
+ bool can_init_parents)
+ REQUIRES(!dex_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
bool WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock);
@@ -718,12 +704,65 @@ class ClassLinker {
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
+ // Links the virtual methods for the given class and records any default methods that will need to
+ // be updated later.
+ //
+ // Arguments:
+ // * self - The current thread.
+ // * klass - class, whose vtable will be filled in.
+ // * default_translations - Vtable index to new method map.
+ // Any vtable entries that need to be updated with new default methods
+ // are stored into the default_translations map. The default_translations
+ // map is keyed on the vtable index that needs to be updated. We use this
+ // map because if we override a default method with another default
+ // method we need to update the vtable to point to the new method.
+ // Unfortunately since we copy the ArtMethod* we cannot just do a simple
+ // scan, we therefore store the vtable index's that might need to be
+ // updated with the method they will turn into.
+ // TODO This whole default_translations thing is very dirty. There should be a better way.
+ bool LinkVirtualMethods(Thread* self,
+ Handle<mirror::Class> klass,
+ /*out*/std::unordered_map<size_t, ArtMethod*>* default_translations)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Sets up the interface lookup table (IFTable) in the correct order to allow searching for
+ // default methods.
+ bool SetupInterfaceLookupTable(Thread* self,
+ Handle<mirror::Class> klass,
+ Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Find the default method implementation for 'interface_method' in 'klass', if one exists.
+ //
+ // Arguments:
+ // * self - The current thread.
+ // * target_method - The method we are trying to find a default implementation for.
+ // * klass - The class we are searching for a definition of target_method.
+ // * out_default_method - The pointer we will store the found default method to on success.
+ // * icce_message - A string we will store an appropriate IncompatibleClassChangeError message
+ // into in case of failure. Note we must do it this way since we do not know
+ // whether we can allocate the exception object, which could cause us to go to
+ // sleep.
+ //
+ // Return value:
+ // * True - There were no conflicting method implementations found in the class while searching
+ // for target_method. The default method implementation is stored into out_default_method
+ // if it was found. Otherwise *out_default_method will be set to nullptr.
+ // * False - Conflicting method implementations were found when searching for target_method. The
+ // value of *out_default_method is undefined and *icce_message is a string that should
+ // be used to create an IncompatibleClassChangeError as soon as possible.
+ bool FindDefaultMethodImplementation(Thread* self,
+ ArtMethod* target_method,
+ Handle<mirror::Class> klass,
+ /*out*/ArtMethod** out_default_method,
+ /*out*/std::string* icce_message) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Sets the imt entries and fixes up the vtable for the given class by linking all the interface
+ // methods. See LinkVirtualMethods for an explanation of what default_translations is.
bool LinkInterfaceMethods(Thread* self,
Handle<mirror::Class> klass,
- Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+ const std::unordered_map<size_t, ArtMethod*>& default_translations,
ArtMethod** out_imt)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -758,12 +797,6 @@ class ClassLinker {
return dex_caches_;
}
- const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
- REQUIRES(!dex_lock_);
-
- // Returns the boot image oat file.
- const OatFile* GetBootOatFile() SHARED_REQUIRES(dex_lock_);
-
void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
SHARED_REQUIRES(Locks::mutator_lock_);
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
@@ -813,9 +846,6 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
- // Check for duplicate class definitions of the given oat file against all open oat files.
- bool HasCollisions(const OatFile* oat_file, std::string* error_msg) REQUIRES(!dex_lock_);
-
bool HasInitWithString(Thread* self, const char* descriptor)
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!dex_lock_);
@@ -834,7 +864,6 @@ class ClassLinker {
// JNI weak globals to allow dex caches to get unloaded. We lazily delete weak globals when we
// register new dex files.
std::list<jweak> dex_caches_ GUARDED_BY(dex_lock_);
- std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// This contains the class loaders which have class tables. It is populated by
// InsertClassTableForClassLoader.
@@ -880,8 +909,8 @@ class ClassLinker {
// Image pointer size.
size_t image_pointer_size_;
+ friend class ImageDumper; // for DexLock
friend class ImageWriter; // for GetClassRoots
- friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class JniInternalTest; // for GetRuntimeQuickGenericJniStub
ART_FRIEND_TEST(mirror::DexCacheTest, Open); // for AllocDexCache
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index fc8e6c49da..4b0cbc836c 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -21,7 +21,9 @@
namespace art {
ClassTable::ClassTable() {
- classes_.push_back(ClassSet());
+ Runtime* const runtime = Runtime::Current();
+ classes_.push_back(ClassSet(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor()));
}
void ClassTable::FreezeSnapshot() {
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 56c5d1a2c3..b6b514177a 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -42,6 +42,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mem_map.h"
+#include "native/dalvik_system_DexFile.h"
#include "noop_compiler_callbacks.h"
#include "os.h"
#include "primitive.h"
@@ -516,7 +517,7 @@ std::vector<const DexFile*> CommonRuntimeTest::GetDexFiles(jobject jclass_loader
mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
DCHECK(long_array != nullptr);
int32_t long_array_size = long_array->GetLength();
- for (int32_t j = 0; j < long_array_size; ++j) {
+ for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(
long_array->GetWithoutChecks(j)));
if (cp_dex_file == nullptr) {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index b19381d879..b17b76e2ea 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -612,7 +612,7 @@ void Dbg::Disconnected() {
// Since we're going to disable deoptimization, we clear the deoptimization requests queue.
// This prevents us from having any pending deoptimization request when the debugger attaches
// to us again while no event has been requested yet.
- MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
+ MutexLock mu(self, *Locks::deoptimization_lock_);
deoptimization_requests_.clear();
full_deoptimization_event_count_ = 0U;
}
@@ -1903,8 +1903,7 @@ void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::Expa
JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
return error;
}
@@ -1931,8 +1930,7 @@ JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* p
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
@@ -5043,4 +5041,13 @@ void DeoptimizationRequest::SetMethod(ArtMethod* m) {
method_ = soa.EncodeMethod(m);
}
+void Dbg::VisitRoots(RootVisitor* visitor) {
+ // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
+ ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
+ BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
+ for (Breakpoint& breakpoint : gBreakpoints) {
+ breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*));
+ }
+}
+
} // namespace art
diff --git a/runtime/debugger.h b/runtime/debugger.h
index b3617e4bbb..e908304977 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -646,6 +646,7 @@ class Dbg {
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
static void VisitRoots(RootVisitor* visitor)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 09416cc5c4..a5f9d09900 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -23,7 +23,9 @@
#include "base/stringprintf.h"
#include "dex_file-inl.h"
+#include "experimental_flags.h"
#include "leb128.h"
+#include "runtime.h"
#include "safe_map.h"
#include "utf-inl.h"
#include "utils.h"
@@ -2530,7 +2532,14 @@ bool DexFileVerifier::CheckMethodAccessFlags(uint32_t method_index,
}
// Only the static initializer may have code in an interface.
- if (((class_access_flags & kAccInterface) != 0) && !is_clinit_by_name) {
+ // TODO We should have some way determine whether to allow this experimental flag without the
+ // runtime being started.
+ // We assume experimental flags are enabled when running without a runtime to enable tools like
+ // dexdump to handle dex files with these features.
+ if (((class_access_flags & kAccInterface) != 0)
+ && !is_clinit_by_name
+ && Runtime::Current() != nullptr
+ && !Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
*error_msg = StringPrintf("Non-clinit interface method %" PRIu32 " should not have code",
method_index);
return false;
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f66628d7cb..21e4e445e6 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -34,6 +34,7 @@
#include "mirror/throwable.h"
#include "nth_caller_visitor.h"
#include "runtime.h"
+#include "stack_map.h"
#include "thread.h"
namespace art {
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index f1939993f7..17e6aac357 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -16,6 +16,7 @@
#include "entrypoints/entrypoint_utils.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/mutex.h"
@@ -358,16 +359,17 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
+ ArtCode current_code = GetCallingCodeFrom(caller_sp);
ArtMethod* outer_method = *caller_sp;
ArtMethod* caller = outer_method;
- if ((outer_method != nullptr) && outer_method->IsOptimized(sizeof(void*))) {
+ if ((outer_method != nullptr) && current_code.IsOptimized(sizeof(void*))) {
const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
if (LIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()))) {
- uintptr_t native_pc_offset = outer_method->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = current_code.NativeQuickPcOffset(caller_pc);
+ CodeInfo code_info = current_code.GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 4217cab697..171ace27a5 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -20,6 +20,7 @@
#include <jni.h>
#include <stdint.h>
+#include "art_code.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_instruction.h"
@@ -184,6 +185,10 @@ ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check = false);
+inline ArtCode GetCallingCodeFrom(ArtMethod** sp) {
+ return ArtCode(sp);
+}
+
} // namespace art
#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
deleted file mode 100644
index 72c2e0a5b0..0000000000
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_method-inl.h"
-#include "class_linker.h"
-#include "dex_file-inl.h"
-#include "interpreter/interpreter.h"
-#include "mirror/object-inl.h"
-#include "reflection.h"
-#include "runtime.h"
-#include "stack.h"
-
-namespace art {
-
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result) {
- ArtMethod* method = shadow_frame->GetMethod();
- // Ensure static methods are initialized.
- if (method->IsStatic()) {
- mirror::Class* declaringClass = method->GetDeclaringClass();
- if (UNLIKELY(!declaringClass->IsInitialized())) {
- self->PushShadowFrame(shadow_frame);
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
- if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true,
- true))) {
- self->PopShadowFrame();
- DCHECK(self->IsExceptionPending());
- return;
- }
- self->PopShadowFrame();
- CHECK(h_class->IsInitializing());
- // Reload from shadow frame in case the method moved, this is faster than adding a handle.
- method = shadow_frame->GetMethod();
- }
- }
- uint16_t arg_offset = (code_item == nullptr) ? 0 : code_item->registers_size_ - code_item->ins_size_;
- method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
- (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
- result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
-}
-
-} // namespace art
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
deleted file mode 100644
index 09522149a7..0000000000
--- a/runtime/entrypoints/interpreter/interpreter_entrypoints.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
-#define ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
-
-#include "base/macros.h"
-#include "dex_file.h"
-#include "offsets.h"
-
-#define INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x) \
- Thread::InterpreterEntryPointOffset<ptr_size>(OFFSETOF_MEMBER(InterpreterEntryPoints, x))
-
-namespace art {
-
-union JValue;
-class ShadowFrame;
-class Thread;
-
-// Pointers to functions that are called by interpreter trampolines via thread-local storage.
-struct PACKED(4) InterpreterEntryPoints {
- void (*pInterpreterToInterpreterBridge)(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
- void (*pInterpreterToCompiledCodeBridge)(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.h b/runtime/entrypoints/jni/jni_entrypoints.h
index 6fb0560a18..9c1b0dc62e 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.h
+++ b/runtime/entrypoints/jni/jni_entrypoints.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
#define ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
+#include "jni.h"
+
#include "base/macros.h"
#include "offsets.h"
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 28c62a8524..4e4f8510ec 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -56,9 +56,8 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -83,9 +82,8 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 3bf001e249..4adb39b9c6 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -21,8 +21,9 @@
namespace art {
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
@@ -41,8 +42,9 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
}
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
- NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 5d3ac73d77..6035dfe084 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "art_code.h"
#include "art_method-inl.h"
#include "callee_save_frame.h"
#include "common_throws.h"
@@ -294,7 +295,8 @@ class QuickArgumentVisitor {
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
SHARED_REQUIRES(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
- CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize, (*sp)->GetFrameSizeInBytes());
+ CHECK_EQ(kQuickCalleeSaveFrame_RefAndArgs_FrameSize,
+ GetCallingCodeFrom(sp).GetFrameSizeInBytes());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -320,12 +322,11 @@ class QuickArgumentVisitor {
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- ArtMethod* outer_method = *caller_sp;
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
- uintptr_t outer_pc_offset = outer_method->NativeQuickPcOffset(outer_pc);
+ uintptr_t outer_pc_offset = GetCallingCodeFrom(caller_sp).NativeQuickPcOffset(outer_pc);
- if (outer_method->IsOptimized(sizeof(void*))) {
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ if (GetCallingCodeFrom(caller_sp).IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetCallingCodeFrom(caller_sp).GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -336,7 +337,7 @@ class QuickArgumentVisitor {
return stack_map.GetDexPc(encoding);
}
} else {
- return outer_method->ToDexPc(outer_pc);
+ return GetCallingCodeFrom(caller_sp).ToDexPc(outer_pc);
}
}
@@ -719,7 +720,7 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
uint16_t num_regs = code_item->registers_size_;
// No last shadow coming from quick.
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, nullptr, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, /* link */ nullptr, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
@@ -841,8 +842,9 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
- DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
+ DCHECK_EQ(GetCallingCodeFrom(sp).GetFrameSizeInBytes(),
+ ArtCode(Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs))
+ .GetFrameSizeInBytes())
<< PrettyMethod(proxy_method);
self->VerifyStack();
// Start new JNI local reference state.
@@ -1522,9 +1524,9 @@ class ComputeNativeCallFrameSize {
return sp8;
}
- virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
+ virtual void WalkHeader(
+ BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(sm);
}
void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 0b366944e4..5299394d7c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -49,7 +49,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckFrameSize(InstructionSet isa, Runtime::CalleeSaveType type, uint32_t save_size)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
<< type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
<< frame_info.FpSpillMask() << std::dec << " ISA " << isa;
@@ -58,8 +58,8 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
static void CheckPCOffset(InstructionSet isa, Runtime::CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
- EXPECT_EQ(save_method->GetReturnPcOffset().SizeValue(), pc_offset)
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
+ EXPECT_EQ(ArtCode(save_method).GetReturnPcOffset().SizeValue(), pc_offset)
<< "Expected and real pc offset differs for " << type
<< " core spills=" << std::hex << frame_info.CoreSpillMask()
<< " fp spills=" << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c37d159d1a..78f56eef8d 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -114,7 +114,7 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, interpreter_entrypoints,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, jni_entrypoints,
sizeof(void*) * 6);
// Skip across the entrypoints structures.
@@ -137,15 +137,6 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
thread_tlsptr_end);
}
- void CheckInterpreterEntryPoints() {
- CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToInterpreterBridge) == 0,
- InterpreterEntryPoints_start_with_i2i);
- EXPECT_OFFSET_DIFFNP(InterpreterEntryPoints, pInterpreterToInterpreterBridge,
- pInterpreterToCompiledCodeBridge, sizeof(void*));
- CHECKED(OFFSETOF_MEMBER(InterpreterEntryPoints, pInterpreterToCompiledCodeBridge)
- + sizeof(void*) == sizeof(InterpreterEntryPoints), InterpreterEntryPoints_all);
- }
-
void CheckJniEntryPoints() {
CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup) == 0,
JniEntryPoints_start_with_dlsymlookup);
@@ -321,10 +312,6 @@ TEST_F(EntrypointsOrderTest, ThreadOffsets) {
CheckThreadOffsets();
}
-TEST_F(EntrypointsOrderTest, InterpreterEntryPoints) {
- CheckInterpreterEntryPoints();
-}
-
TEST_F(EntrypointsOrderTest, JniEntryPoints) {
CheckJniEntryPoints();
}
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 9f84bd2a39..da1d80ea33 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -169,7 +169,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
r->SetInstructionSet(kRuntimeISA);
ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
- QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
+ QuickMethodFrameInfo frame_info = ArtCode(save_method).GetQuickFrameInfo();
ASSERT_EQ(kStackAlignment, 16U);
// ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
@@ -187,14 +187,14 @@ TEST_F(ExceptionTest, StackTraceElement) {
}
fake_stack.push_back(
- method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method g
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_g_));
fake_stack.push_back(0);
fake_stack.push_back(0);
fake_stack.push_back(
- method_g_->ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
+ ArtCode(method_g_).ToNativeQuickPc(dex_pc, /* is_catch_handler */ false)); // return pc
// Create/push fake 16byte stack frame for method f
fake_stack.push_back(reinterpret_cast<uintptr_t>(method_f_));
diff --git a/runtime/experimental_flags.h b/runtime/experimental_flags.h
new file mode 100644
index 0000000000..2e674e95c6
--- /dev/null
+++ b/runtime/experimental_flags.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
+#define ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
+
+#include <ostream>
+
+namespace art {
+
+// Possible experimental features that might be enabled.
+struct ExperimentalFlags {
+ // The actual flag values.
+ enum {
+ kNone = 0x0000,
+ kLambdas = 0x0001,
+ kDefaultMethods = 0x0002,
+ };
+
+ constexpr ExperimentalFlags() : value_(0x0000) {}
+ constexpr ExperimentalFlags(decltype(kNone) t) : value_(static_cast<uint32_t>(t)) {}
+
+ constexpr operator decltype(kNone)() const {
+ return static_cast<decltype(kNone)>(value_);
+ }
+
+ constexpr explicit operator bool() const {
+ return value_ != kNone;
+ }
+
+ constexpr ExperimentalFlags operator|(const decltype(kNone)& b) const {
+ return static_cast<decltype(kNone)>(value_ | static_cast<uint32_t>(b));
+ }
+ constexpr ExperimentalFlags operator|(const ExperimentalFlags& b) const {
+ return static_cast<decltype(kNone)>(value_ | b.value_);
+ }
+
+ constexpr ExperimentalFlags operator&(const ExperimentalFlags& b) const {
+ return static_cast<decltype(kNone)>(value_ & b.value_);
+ }
+ constexpr ExperimentalFlags operator&(const decltype(kNone)& b) const {
+ return static_cast<decltype(kNone)>(value_ & static_cast<uint32_t>(b));
+ }
+
+ constexpr bool operator==(const ExperimentalFlags& b) const {
+ return value_ == b.value_;
+ }
+
+ private:
+ uint32_t value_;
+};
+
+inline std::ostream& operator<<(std::ostream& stream, const ExperimentalFlags& e) {
+ bool started = false;
+ if (e & ExperimentalFlags::kLambdas) {
+ stream << (started ? "|" : "") << "kLambdas";
+ started = true;
+ }
+ if (e & ExperimentalFlags::kDefaultMethods) {
+ stream << (started ? "|" : "") << "kDefaultMethods";
+ started = true;
+ }
+ if (!started) {
+ stream << "kNone";
+ }
+ return stream;
+}
+
+inline std::ostream& operator<<(std::ostream& stream, const decltype(ExperimentalFlags::kNone)& e) {
+ return stream << ExperimentalFlags(e);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_EXPERIMENTAL_FLAGS_H_
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index c3a962737f..5b31b3aa4b 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -20,6 +20,7 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
+#include "art_code.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
#include "mirror/class.h"
@@ -359,16 +360,17 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che
return false;
}
+ ArtCode art_code(method_obj);
+
// We can be certain that this is a method now. Check if we have a GC map
// at the return PC address.
if (true || kIsDebugBuild) {
VLOG(signals) << "looking for dex pc for return pc " << std::hex << return_pc;
- const void* code = Runtime::Current()->GetInstrumentation()->GetQuickCodeFor(method_obj,
- sizeof(void*));
- uint32_t sought_offset = return_pc - reinterpret_cast<uintptr_t>(code);
+ uint32_t sought_offset = return_pc -
+ reinterpret_cast<uintptr_t>(art_code.GetQuickOatEntryPoint(sizeof(void*)));
VLOG(signals) << "pc offset: " << std::hex << sought_offset;
}
- uint32_t dexpc = method_obj->ToDexPc(return_pc, false);
+ uint32_t dexpc = art_code.ToDexPc(return_pc, false);
VLOG(signals) << "dexpc: " << dexpc;
return !check_dex_pc || dexpc != DexFile::kDexNoIndex;
}
@@ -404,9 +406,8 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
manager_->AddHandler(this, false);
}
-bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
+bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 277d319035..eb0852af6e 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -41,8 +41,7 @@ class RememberedSetCardVisitor {
explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
: dirty_cards_(dirty_cards) {}
- void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 3d85395377..e747f00c92 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -77,7 +77,8 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
}
extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
- void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
if (used_bytes == 0) {
return;
@@ -86,10 +87,10 @@ extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+extern "C" void DlmallocObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
- UNUSED(start);
- UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 87f1392920..3ce3d634f5 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -131,6 +131,7 @@ class RosAlloc {
private:
Slot* next_; // Next slot in the list.
+ friend class RosAlloc;
};
// We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
@@ -302,6 +303,7 @@ class RosAlloc {
// free without traversing the whole free list.
uint32_t size_;
uint32_t padding_ ATTRIBUTE_UNUSED;
+ friend class RosAlloc;
};
// Represents a run of memory slots of the same size.
@@ -482,7 +484,7 @@ class RosAlloc {
static constexpr uint8_t kMagicNumFree = 43;
// The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
- // The number of smaller size brackets that are 16 bytes apart.
+ // The number of smaller size brackets that are the quantum size apart.
static constexpr size_t kNumOfQuantumSizeBrackets = 32;
// The sizes (the slot sizes, in bytes) of the size brackets.
static size_t bracketSizes[kNumOfSizeBrackets];
@@ -520,9 +522,7 @@ class RosAlloc {
}
// Returns true if the given allocation size is for a thread local allocation.
static bool IsSizeForThreadLocal(size_t size) {
- DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
- size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
- bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
+ bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize;
DCHECK(size > kLargeSizeThreshold ||
(is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
return is_size_for_thread_local;
@@ -634,6 +634,16 @@ class RosAlloc {
// are less than this index. We use shared (current) runs for the rest.
static const size_t kNumThreadLocalSizeBrackets = 8;
+ // The size of the largest bracket we use thread-local runs for.
+ // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
+ static const size_t kMaxThreadLocalBracketSize = 128;
+
+ // The bracket size increment for the brackets of size <= 512 bytes.
+ static constexpr size_t kBracketQuantumSize = 16;
+
+ // Equal to Log2(kQuantumBracketSizeIncrement).
+ static constexpr size_t kBracketQuantumSizeShift = 4;
+
private:
// The base address of the memory region that's managed by this allocator.
uint8_t* base_;
@@ -770,6 +780,19 @@ class RosAlloc {
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+ static size_t RunFreeListOffset() {
+ return OFFSETOF_MEMBER(Run, free_list_);
+ }
+ static size_t RunFreeListHeadOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
+ }
+ static size_t RunFreeListSizeOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
+ }
+ static size_t RunSlotNextOffset() {
+ return OFFSETOF_MEMBER(Slot, next_);
+ }
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 0a7a69f37e..d2d12af6b4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -18,6 +18,7 @@
#include "art_field-inl.h"
#include "base/stl_util.h"
+#include "debugger.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/reference_processor.h"
@@ -385,6 +386,10 @@ void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split5("VisitNonThreadRoots", GetTimings());
Runtime::Current()->VisitNonThreadRoots(this);
}
+ {
+ TimingLogger::ScopedTiming split6("Dbg::VisitRoots", GetTimings());
+ Dbg::VisitRoots(this);
+ }
Runtime::Current()->GetHeap()->VisitAllocationRecords(this);
// Immune spaces.
@@ -401,7 +406,7 @@ void ConcurrentCopying::MarkingPhase() {
Thread* self = Thread::Current();
{
- TimingLogger::ScopedTiming split6("ProcessMarkStack", GetTimings());
+ TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
// We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
// primary reasons are the fact that we need to use a checkpoint to process thread-local mark
// stacks, but after we disable weak refs accesses, we can't use a checkpoint due to a deadlock
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 6c32658e43..bb7e854ea1 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,8 +56,7 @@ void StickyMarkSweep::MarkReachableObjects() {
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
-void StickyMarkSweep::Sweep(bool swap_bitmaps) {
- UNUSED(swap_bitmaps);
+void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1923d24805..ce64b10364 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -789,10 +789,13 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
CHECK(image_header.GetOatDataBegin() != nullptr);
- OatFile* oat_file = OatFile::Open(oat_filename, oat_filename, image_header.GetOatDataBegin(),
+ OatFile* oat_file = OatFile::Open(oat_filename,
+ oat_filename,
+ image_header.GetOatDataBegin(),
image_header.GetOatFileBegin(),
!Runtime::Current()->IsAotCompiler(),
- nullptr, error_msg);
+ nullptr,
+ error_msg);
if (oat_file == nullptr) {
*error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
oat_filename.c_str(), GetName(), error_msg->c_str());
@@ -839,15 +842,13 @@ bool ImageSpace::ValidateOatFile(std::string* error_msg) const {
return true;
}
-
const OatFile* ImageSpace::GetOatFile() const {
return oat_file_non_owned_;
}
-
-OatFile* ImageSpace::ReleaseOatFile() {
- CHECK(oat_file_.get() != nullptr);
- return oat_file_.release();
+std::unique_ptr<const OatFile> ImageSpace::ReleaseOatFile() {
+ CHECK(oat_file_ != nullptr);
+ return std::move(oat_file_);
}
void ImageSpace::Dump(std::ostream& os) const {
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 215c18b8d9..99207426a0 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -62,9 +62,8 @@ class ImageSpace : public MemMapSpace {
const OatFile* GetOatFile() const;
// Releases the OatFile from the ImageSpace so it can be transfer to
- // the caller, presumably the ClassLinker.
- OatFile* ReleaseOatFile()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ // the caller, presumably the OatFileManager.
+ std::unique_ptr<const OatFile> ReleaseOatFile();
void VerifyImageAllocations()
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index a5dbad9af6..c0810110cc 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -48,9 +48,7 @@ class MemoryToolMallocSpace FINAL : public BaseMallocSpaceType {
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
- UNUSED(ptr);
- }
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9e882a898e..bbfcb31ab1 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -31,8 +31,7 @@ class CountObjectsAllocated {
explicit CountObjectsAllocated(size_t* objects_allocated)
: objects_allocated_(objects_allocated) {}
- void operator()(mirror::Object* obj) const {
- UNUSED(obj);
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
++*objects_allocated_;
}
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 2c44da231e..f1d26d9a41 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -105,8 +105,7 @@ class TestOrderTask : public HeapTask {
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread) OVERRIDE {
- UNUSED(thread); // Fix cppling bug.
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 42b348ac58..192371fe75 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '1', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '2', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index c9ba6cfada..a5b63b4271 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -17,6 +17,7 @@
#include "indirect_reference_table-inl.h"
#include "jni_internal.h"
+#include "nth_caller_visitor.h"
#include "reference_table.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index deada4c5dc..2dd2b7d403 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -19,6 +19,7 @@
#include <sstream>
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "atomic.h"
#include "class_linker.h"
@@ -251,7 +252,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg)
instrumentation_stack_->insert(it, instrumentation_frame);
SetReturnPc(instrumentation_exit_pc_);
}
- dex_pcs_.push_back(m->ToDexPc(last_return_pc_));
+ dex_pcs_.push_back(GetCurrentCode().ToDexPc(last_return_pc_));
last_return_pc_ = return_pc;
++instrumentation_stack_depth_;
return true; // Continue.
@@ -960,6 +961,15 @@ void Instrumentation::ExceptionCaughtEvent(Thread* thread,
}
}
+// Computes a frame ID by ignoring inlined frames.
+size_t Instrumentation::ComputeFrameId(Thread* self,
+ size_t frame_depth,
+ size_t inlined_frames_before_frame) {
+ CHECK_GE(frame_depth, inlined_frames_before_frame);
+ size_t no_inline_depth = frame_depth - inlined_frames_before_frame;
+ return StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) - no_inline_depth;
+}
+
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 612ca14cf5..8dd2357e06 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -397,6 +397,11 @@ class Instrumentation {
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_);
+ static size_t ComputeFrameId(Thread* self,
+ size_t frame_depth,
+ size_t inlined_frames_before_frame)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
InstrumentationLevel GetCurrentInstrumentationLevel() const;
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 179353e84b..f4658d5342 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -459,4 +459,12 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
}
}
+InternTable::Table::Table() {
+ Runtime* const runtime = Runtime::Current();
+ pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+ post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+}
+
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 24c5af938c..3a4e8d8f11 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -146,6 +146,7 @@ class InternTable {
// weak interns and strong interns.
class Table {
public:
+ Table();
mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index f783b04b95..7c0594a8bb 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -14,10 +14,11 @@
* limitations under the License.
*/
-#include "interpreter_common.h"
+#include "interpreter.h"
#include <limits>
+#include "interpreter_common.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
@@ -332,7 +333,7 @@ void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receive
// Set up shadow frame with matching number of reference slots to vregs.
ShadowFrame* last_shadow_frame = self->GetManagedStack()->GetTopShadowFrame();
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, 0);
+ CREATE_SHADOW_FRAME(num_regs, last_shadow_frame, method, /* dex pc */ 0);
ShadowFrame* shadow_frame = shadow_frame_unique_ptr.get();
self->PushShadowFrame(shadow_frame);
@@ -448,8 +449,8 @@ JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* cod
return Execute(self, code_item, *shadow_frame, JValue());
}
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result) {
+void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result) {
bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
@@ -457,10 +458,11 @@ extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::C
}
self->PushShadowFrame(shadow_frame);
+ ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
- const bool is_static = shadow_frame->GetMethod()->IsStatic();
+ const bool is_static = method->IsStatic();
if (is_static) {
- mirror::Class* declaring_class = shadow_frame->GetMethod()->GetDeclaringClass();
+ mirror::Class* declaring_class = method->GetDeclaringClass();
if (UNLIKELY(!declaring_class->IsInitialized())) {
StackHandleScope<1> hs(self);
HandleWrapper<Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 61140a24cf..b21ea84d8e 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -45,16 +45,11 @@ extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeIt
ShadowFrame* shadow_frame)
SHARED_REQUIRES(Locks::mutator_lock_);
-
-} // namespace interpreter
-
-extern "C" void artInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result)
+void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result)
SHARED_REQUIRES(Locks::mutator_lock_);
-extern "C" void artInterpreterToCompiledCodeBridge(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ad34c9ad9e..18fb0d8518 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -40,8 +40,9 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
uint16_t inst_data) {
const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -234,8 +235,9 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
bool do_assignability_check = do_access_check;
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -516,6 +518,39 @@ static inline bool NeedsInterpreter(Thread* self, ShadowFrame* new_shadow_frame)
Dbg::IsForcedInterpreterNeededForCalling(self, target);
}
+static void ArtInterpreterToCompiledCodeBridge(Thread* self,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* method = shadow_frame->GetMethod();
+ // Ensure static methods are initialized.
+ if (method->IsStatic()) {
+ mirror::Class* declaringClass = method->GetDeclaringClass();
+ if (UNLIKELY(!declaringClass->IsInitialized())) {
+ self->PushShadowFrame(shadow_frame);
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
+ if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true,
+ true))) {
+ self->PopShadowFrame();
+ DCHECK(self->IsExceptionPending());
+ return;
+ }
+ self->PopShadowFrame();
+ CHECK(h_class->IsInitializing());
+ // Reload from shadow frame in case the method moved, this is faster than adding a handle.
+ method = shadow_frame->GetMethod();
+ }
+ }
+ uint16_t arg_offset = (code_item == nullptr)
+ ? 0
+ : code_item->registers_size_ - code_item->ins_size_;
+ method->Invoke(self, shadow_frame->GetVRegArgs(arg_offset),
+ (shadow_frame->NumberOfVRegs() - arg_offset) * sizeof(uint32_t),
+ result, method->GetInterfaceMethodIfProxy(sizeof(void*))->GetShorty());
+}
+
template <bool is_range,
bool do_assignability_check,
size_t kVarArgMax>
@@ -586,7 +621,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Allocate shadow frame on the stack.
const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
- CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, 0);
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
// Initialize new shadow frame by copying the registers from the callee shadow frame.
@@ -690,9 +725,9 @@ static inline bool DoCallCommon(ArtMethod* called_method,
// Do the call now.
if (LIKELY(Runtime::Current()->IsStarted())) {
if (NeedsInterpreter(self, new_shadow_frame)) {
- artInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
+ ArtInterpreterToInterpreterBridge(self, code_item, new_shadow_frame, result);
} else {
- artInterpreterToCompiledCodeBridge(self, code_item, new_shadow_frame, result);
+ ArtInterpreterToCompiledCodeBridge(self, code_item, new_shadow_frame, result);
}
} else {
UnstartedRuntime::Invoke(self, code_item, new_shadow_frame, result, first_dest_reg);
@@ -742,7 +777,7 @@ static inline bool DoCallCommon(ArtMethod* called_method,
template<bool is_range, bool do_assignability_check>
bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+ const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
const uint4_t num_additional_registers = inst->VRegB_25x();
// Argument word count.
const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
@@ -757,7 +792,6 @@ bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_fr
vregC = inst->VRegC_3rc();
} else {
// TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- UNUSED(inst_data);
inst->GetAllArgs25x(arg);
}
@@ -773,7 +807,8 @@ template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
- const uint16_t number_of_inputs = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ const uint16_t number_of_inputs =
+ (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
// TODO: find a cleaner way to separate non-range and range information without duplicating
// code.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index f57bddbb4f..8c495fc7bb 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -45,6 +45,7 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
+#include "stack.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -79,12 +80,28 @@ extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
void ThrowNullPointerExceptionFromInterpreter()
SHARED_REQUIRES(Locks::mutator_lock_);
-static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorEnter(self);
+template <bool kMonitorCounting>
+static inline void DoMonitorEnter(Thread* self,
+ ShadowFrame* frame,
+ Object* ref)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<1> hs(self);
+ Handle<Object> h_ref(hs.NewHandle(ref));
+ h_ref->MonitorEnter(self);
+ frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, h_ref.Get());
}
-static inline void DoMonitorExit(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
- ref->MonitorExit(self);
+template <bool kMonitorCounting>
+static inline void DoMonitorExit(Thread* self,
+ ShadowFrame* frame,
+ Object* ref)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_) {
+ StackHandleScope<1> hs(self);
+ Handle<Object> h_ref(hs.NewHandle(ref));
+ h_ref->MonitorExit(self);
+ frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, h_ref.Get());
}
void AbortTransactionF(Thread* self, const char* fmt, ...)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 9677d79de3..9766299ae0 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -19,6 +19,7 @@
#include "base/stl_util.h" // MakeUnique
+#include "experimental_flags.h"
#include "interpreter_common.h"
#include "safe_math.h"
@@ -83,12 +84,17 @@ namespace interpreter {
#define HANDLE_EXPERIMENTAL_INSTRUCTION_START(opcode) \
HANDLE_INSTRUCTION_START(opcode); \
DCHECK(inst->IsExperimental()); \
- if (Runtime::Current()->AreExperimentalLambdasEnabled()) {
+ if (Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas)) {
#define HANDLE_EXPERIMENTAL_INSTRUCTION_END() \
} else { \
UnexpectedOpcode(inst, shadow_frame); \
} HANDLE_INSTRUCTION_END();
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
/**
* Interpreter based on computed goto tables.
@@ -275,6 +281,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(RETURN_VOID_NO_BARRIER) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -289,6 +296,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -304,6 +312,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -318,6 +327,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
@@ -331,6 +341,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(RETURN_OBJECT) {
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const uint8_t vreg_index = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(vreg_index);
if (do_assignability_check && obj_result != nullptr) {
@@ -468,7 +479,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -480,7 +491,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_access_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), 1);
}
}
@@ -2544,6 +2555,8 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
+ // Structured locking is to be enforced for abnormal termination, too.
+ shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self);
return JValue(); /* Handled in caller. */
} else {
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc);
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 083dfb5267..bf95a0e46f 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -15,6 +15,7 @@
*/
#include "base/stl_util.h" // MakeUnique
+#include "experimental_flags.h"
#include "interpreter_common.h"
#include "safe_math.h"
@@ -31,6 +32,9 @@ namespace interpreter {
inst->GetDexPc(insns), \
instrumentation); \
if (found_dex_pc == DexFile::kDexNoIndex) { \
+ /* Structured locking is to be enforced for abnormal termination, too. */ \
+ shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \
return JValue(); /* Handled in caller. */ \
} else { \
int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); \
@@ -47,6 +51,12 @@ namespace interpreter {
} \
} while (false)
+#define HANDLE_MONITOR_CHECKS() \
+ if (!shadow_frame.GetLockCountData(). \
+ CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \
+ HANDLE_PENDING_EXCEPTION(); \
+ }
+
// Code to run before each dex instruction.
#define PREAMBLE() \
do { \
@@ -58,7 +68,7 @@ namespace interpreter {
static bool IsExperimentalInstructionEnabled(const Instruction *inst) {
DCHECK(inst->IsExperimental());
- return Runtime::Current()->AreExperimentalLambdasEnabled();
+ return Runtime::Current()->AreExperimentalFlagsEnabled(ExperimentalFlags::kLambdas);
}
template<bool do_access_check, bool transaction_active>
@@ -182,6 +192,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -194,6 +205,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
QuasiAtomic::ThreadFenceForConstructor();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -207,6 +219,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
result.SetJ(0);
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -219,6 +232,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
JValue result;
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), inst->GetDexPc(insns),
@@ -230,6 +244,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
JValue result;
self->AllowThreadSuspension();
+ HANDLE_MONITOR_CHECKS();
const size_t ref_idx = inst->VRegA_11x(inst_data);
Object* obj_result = shadow_frame.GetVRegReference(ref_idx);
if (do_assignability_check && obj_result != nullptr) {
@@ -366,7 +381,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorEnter(self, obj);
+ DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
@@ -378,7 +393,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
- DoMonitorExit(self, obj);
+ DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index c559389dba..92b6e4fe0d 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1099,7 +1099,7 @@ void UnstartedRuntime::Invoke(Thread* self, const DexFile::CodeItem* code_item,
(*iter->second)(self, shadow_frame, result, arg_offset);
} else {
// Not special, continue with regular interpreter execution.
- artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
+ ArtInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
}
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0a4d6e3d9e..5427a5812a 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1072,9 +1072,8 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand
return WriteTaggedObject(reply, contended_monitor);
}
-static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
+static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 4104d7a0e8..dab10403af 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -93,8 +93,7 @@ void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) {
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6bc18291cb..234a733967 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1743,8 +1743,9 @@ class JNI {
return static_cast<jchar*>(s->GetValue());
}
- static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
- UNUSED(chars);
+ static void ReleaseStringCritical(JNIEnv* env,
+ jstring java_string,
+ const jchar* chars ATTRIBUTE_UNUSED) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 6a6d1986dc..7b91b0b2b6 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -32,7 +32,7 @@ union PACKED(4) JValue {
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
- i = ((static_cast<int32_t>(new_b) << 24) >> 24); // Sign-extend.
+ j = ((static_cast<int64_t>(new_b) << 56) >> 56); // Sign-extend to 64 bits.
}
uint16_t GetC() const { return c; }
@@ -45,7 +45,9 @@ union PACKED(4) JValue {
void SetF(float new_f) { f = new_f; }
int32_t GetI() const { return i; }
- void SetI(int32_t new_i) { i = new_i; }
+ void SetI(int32_t new_i) {
+ j = ((static_cast<int64_t>(new_i) << 32) >> 32); // Sign-extend to 64 bits.
+ }
int64_t GetJ() const { return j; }
void SetJ(int64_t new_j) { j = new_j; }
@@ -55,7 +57,7 @@ union PACKED(4) JValue {
int16_t GetS() const { return s; }
void SetS(int16_t new_s) {
- i = ((static_cast<int32_t>(new_s) << 16) >> 16); // Sign-extend.
+ j = ((static_cast<int64_t>(new_s) << 48) >> 48); // Sign-extend to 64 bits.
}
uint8_t GetZ() const { return z; }
diff --git a/runtime/leb128.h b/runtime/leb128.h
index baf9da28f0..74934aebf0 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -127,8 +127,9 @@ static inline uint8_t* EncodeUnsignedLeb128(uint8_t* dest, uint32_t value) {
return dest;
}
-template<typename Allocator>
-static inline void EncodeUnsignedLeb128(std::vector<uint8_t, Allocator>* dest, uint32_t value) {
+template <typename Vector>
+static inline void EncodeUnsignedLeb128(Vector* dest, uint32_t value) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
uint8_t out = value & 0x7f;
value >>= 7;
while (value != 0) {
@@ -165,8 +166,9 @@ static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
return dest;
}
-template<typename Allocator>
-static inline void EncodeSignedLeb128(std::vector<uint8_t, Allocator>* dest, int32_t value) {
+template<typename Vector>
+static inline void EncodeSignedLeb128(Vector* dest, int32_t value) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
uint8_t out = value & 0x7f;
while (extra_bits != 0u) {
@@ -179,10 +181,12 @@ static inline void EncodeSignedLeb128(std::vector<uint8_t, Allocator>* dest, int
}
// An encoder that pushes int32_t/uint32_t data onto the given std::vector.
-template <typename Allocator = std::allocator<uint8_t>>
+template <typename Vector = std::vector<uint8_t>>
class Leb128Encoder {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
- explicit Leb128Encoder(std::vector<uint8_t, Allocator>* data) : data_(data) {
+ explicit Leb128Encoder(Vector* data) : data_(data) {
DCHECK(data != nullptr);
}
@@ -212,27 +216,29 @@ class Leb128Encoder {
}
}
- const std::vector<uint8_t, Allocator>& GetData() const {
+ const Vector& GetData() const {
return *data_;
}
protected:
- std::vector<uint8_t, Allocator>* const data_;
+ Vector* const data_;
private:
DISALLOW_COPY_AND_ASSIGN(Leb128Encoder);
};
// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-template <typename Allocator = std::allocator<uint8_t>>
-class Leb128EncodingVector FINAL : private std::vector<uint8_t, Allocator>,
- public Leb128Encoder<Allocator> {
+template <typename Vector = std::vector<uint8_t>>
+class Leb128EncodingVector FINAL : private Vector,
+ public Leb128Encoder<Vector> {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
+
public:
- Leb128EncodingVector() : Leb128Encoder<Allocator>(this) { }
+ Leb128EncodingVector() : Leb128Encoder<Vector>(this) { }
- explicit Leb128EncodingVector(const Allocator& alloc)
- : std::vector<uint8_t, Allocator>(alloc),
- Leb128Encoder<Allocator>(this) { }
+ explicit Leb128EncodingVector(const typename Vector::allocator_type& alloc)
+ : Vector(alloc),
+ Leb128Encoder<Vector>(this) { }
private:
DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 2a019c5bae..2d3581da8f 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -134,11 +134,25 @@ static uintptr_t GenerateNextMemPos() {
uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
#endif
-// Return true if the address range is contained in a single /proc/self/map entry.
-static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size,
- std::string* error_msg) {
+// Return true if the address range is contained in a single memory map by either reading
+// the maps_ variable or the /proc/self/map entry.
+bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
uintptr_t end = begin + size;
+
+ // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
+ // further.
+ {
+ MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
+ for (auto& pair : *maps_) {
+ MemMap* const map = pair.second;
+ if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
+ end <= reinterpret_cast<uintptr_t>(map->End())) {
+ return true;
+ }
+ }
+ }
+
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
if (map.get() == nullptr) {
*error_msg = StringPrintf("Failed to build process map");
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 196a7f6292..7c11cebcef 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -161,6 +161,8 @@ class MemMap {
REQUIRES(Locks::mem_maps_lock_);
static MemMap* GetLargestMemMapAt(void* address)
REQUIRES(Locks::mem_maps_lock_);
+ static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
+ REQUIRES(!Locks::mem_maps_lock_);
const std::string name_;
uint8_t* const begin_; // Start of data.
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 3d540297e5..ec7d758ebb 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,9 +100,8 @@ class SetLengthVisitor {
explicit SetLengthVisitor(int32_t length) : length_(length) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 93f2aea38e..a528c3b890 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -392,7 +392,8 @@ inline ArtMethod* Class::FindVirtualMethodForInterface(ArtMethod* method, size_t
}
inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method, size_t pointer_size) {
- DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
+ // Only miranda or default methods may come from interfaces and be used as a virtual.
+ DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsDefault() || method->IsMiranda());
// The argument method may from a super class.
// Use the index to a potentially overridden one for this instance's class.
return GetVTableEntry(method->GetMethodIndex(), pointer_size);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 2668b3db9c..8219d69b6e 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -30,6 +30,7 @@
#include "primitive.h"
#include "read_barrier_option.h"
#include "stride_iterator.h"
+#include "thread.h"
#include "utils.h"
#ifndef IMT_SIZE
@@ -229,6 +230,18 @@ class MANAGED Class FINAL : public Object {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
+ ALWAYS_INLINE void SetRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ SetAccessFlags(flags | kAccRecursivelyInitialized);
+ }
+
+ ALWAYS_INLINE void SetHasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+ SetAccessFlags(flags | kAccHasDefaultMethod);
+ }
+
ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
@@ -860,6 +873,14 @@ class MANAGED Class FINAL : public Object {
ArtMethod* FindClassInitializer(size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
+ }
+
+ bool HasBeenRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
+ }
+
ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8fb860fa6b..48f2ca59e8 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -20,9 +20,8 @@
#include "class_linker.h"
#include "common_runtime_test.h"
-#include "gc/heap.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "linear_alloc.h"
+#include "mirror/class_loader-inl.h"
#include "handle_scope-inl.h"
#include "scoped_thread_state_change.h"
@@ -36,7 +35,9 @@ TEST_F(DexCacheTest, Open) {
StackHandleScope<1> hs(soa.Self());
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
Handle<DexCache> dex_cache(
- hs.NewHandle(class_linker_->AllocDexCache(soa.Self(), *java_lang_dex_file_)));
+ hs.NewHandle(class_linker_->AllocDexCache(soa.Self(),
+ *java_lang_dex_file_,
+ Runtime::Current()->GetLinearAlloc())));
ASSERT_TRUE(dex_cache.Get() != nullptr);
EXPECT_EQ(java_lang_dex_file_->NumStringIds(), dex_cache->NumStrings());
@@ -45,5 +46,21 @@ TEST_F(DexCacheTest, Open) {
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
}
+TEST_F(DexCacheTest, LinearAlloc) {
+ ScopedObjectAccess soa(Thread::Current());
+ jobject jclass_loader(LoadDex("Main"));
+ ASSERT_TRUE(jclass_loader != nullptr);
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
+ soa.Decode<mirror::ClassLoader*>(jclass_loader)));
+ mirror::Class* klass = class_linker->FindClass(soa.Self(), "LMain;", class_loader);
+ ASSERT_TRUE(klass != nullptr);
+ LinearAlloc* const linear_alloc = klass->GetClassLoader()->GetAllocator();
+ EXPECT_NE(linear_alloc, runtime->GetLinearAlloc());
+ EXPECT_TRUE(linear_alloc->Contains(klass->GetDexCache()->GetResolvedMethods()));
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 50490bbcae..f75b8aeef4 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -137,9 +137,13 @@ class MANAGED LOCKABLE Object {
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
- mirror::Object* MonitorEnter(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
- EXCLUSIVE_LOCK_FUNCTION();
- bool MonitorExit(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* MonitorEnter(Thread* self)
+ EXCLUSIVE_LOCK_FUNCTION()
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MonitorExit(Thread* self)
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_)
UNLOCK_FUNCTION();
void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index f7ab10be9d..116cbe9254 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -49,8 +49,13 @@ static constexpr uint32_t kAccPreverified = 0x00080000; // class (runt
// method (dex only)
static constexpr uint32_t kAccFastNative = 0x00080000; // method (dex only)
static constexpr uint32_t kAccMiranda = 0x00200000; // method (dex only)
+static constexpr uint32_t kAccDefault = 0x00400000; // method (runtime)
// Special runtime-only flags.
+// Interface and all its super-interfaces with default methods have been recursively initialized.
+static constexpr uint32_t kAccRecursivelyInitialized = 0x20000000;
+// Interface declares some default method.
+static constexpr uint32_t kAccHasDefaultMethod = 0x40000000;
// class/ancestor overrides finalize()
static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index fa5841882e..81e7e6d675 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -274,7 +274,7 @@ void Monitor::Lock(Thread* self) {
}
if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
const char* owners_filename;
- uint32_t owners_line_number;
+ int32_t owners_line_number;
TranslateLocation(owners_method, owners_dex_pc, &owners_filename, &owners_line_number);
if (wait_ms > kLongWaitMs && owners_method != nullptr) {
LOG(WARNING) << "Long monitor contention event with owner method="
@@ -696,6 +696,7 @@ static mirror::Object* FakeUnlock(mirror::Object* obj)
mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
+ self->AssertThreadSuspensionIsAllowable();
obj = FakeLock(obj);
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
@@ -771,6 +772,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
+ self->AssertThreadSuspensionIsAllowable();
obj = FakeUnlock(obj);
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
@@ -1084,7 +1086,7 @@ bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
}
void Monitor::TranslateLocation(ArtMethod* method, uint32_t dex_pc,
- const char** source_file, uint32_t* line_number) const {
+ const char** source_file, int32_t* line_number) const {
// If method is null, location is unknown
if (method == nullptr) {
*source_file = "";
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 8cd93c69d7..707d0f112c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -65,12 +65,16 @@ class Monitor {
// NO_THREAD_SAFETY_ANALYSIS for mon->Lock.
static mirror::Object* MonitorEnter(Thread* thread, mirror::Object* obj)
EXCLUSIVE_LOCK_FUNCTION(obj)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
static bool MonitorExit(Thread* thread, mirror::Object* obj)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!Roles::uninterruptible_)
SHARED_REQUIRES(Locks::mutator_lock_)
- UNLOCK_FUNCTION(obj) NO_THREAD_SAFETY_ANALYSIS;
+ UNLOCK_FUNCTION(obj);
static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
DoNotify(self, obj, false);
@@ -179,7 +183,7 @@ class Monitor {
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number)
+ const char* owner_filename, int32_t owner_line_number)
SHARED_REQUIRES(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj, Thread* expected_owner, Thread* found_owner,
@@ -231,7 +235,7 @@ class Monitor {
// Translates the provided method and pc into its declaring class' source file and line number.
void TranslateLocation(ArtMethod* method, uint32_t pc,
- const char** source_file, uint32_t* line_number) const
+ const char** source_file, int32_t* line_number) const
SHARED_REQUIRES(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
diff --git a/runtime/monitor_android.cc b/runtime/monitor_android.cc
index efe2e823d9..82ef2d841a 100644
--- a/runtime/monitor_android.cc
+++ b/runtime/monitor_android.cc
@@ -50,7 +50,7 @@ static char* EventLogWriteString(char* dst, const char* value, size_t len) {
}
void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
- const char* owner_filename, uint32_t owner_line_number) {
+ const char* owner_filename, int32_t owner_line_number) {
// Emit the event list length, 1 byte.
char eventBuffer[174];
char* cp = eventBuffer;
@@ -80,7 +80,7 @@ void Monitor::LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample
uint32_t pc;
ArtMethod* m = self->GetCurrentMethod(&pc);
const char* filename;
- uint32_t line_number;
+ int32_t line_number;
TranslateLocation(m, pc, &filename, &line_number);
cp = EventLogWriteString(cp, filename, strlen(filename));
diff --git a/runtime/monitor_linux.cc b/runtime/monitor_linux.cc
index 856ebe45f9..1c77ac0eb3 100644
--- a/runtime/monitor_linux.cc
+++ b/runtime/monitor_linux.cc
@@ -18,7 +18,7 @@
namespace art {
-void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, uint32_t) {
+void Monitor::LogContentionEvent(Thread*, uint32_t, uint32_t, const char*, int32_t) {
}
} // namespace art
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 4aebc2c35f..4eea3f39f7 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,6 +27,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "oat_file_assistant.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "profiler.h"
#include "runtime.h"
@@ -39,13 +40,16 @@
namespace art {
-static std::unique_ptr<std::vector<const DexFile*>>
-ConvertJavaArrayToNative(JNIEnv* env, jobject arrayObject) {
+static bool ConvertJavaArrayToDexFiles(
+ JNIEnv* env,
+ jobject arrayObject,
+ /*out*/ std::vector<const DexFile*>& dex_files,
+ /*out*/ const OatFile*& oat_file) {
jarray array = reinterpret_cast<jarray>(arrayObject);
jsize array_size = env->GetArrayLength(array);
if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
+ return false;
}
// TODO: Optimize. On 32bit we can use an int array.
@@ -53,27 +57,24 @@ ConvertJavaArrayToNative(JNIEnv* env, jobject arrayObject) {
jlong* long_data = env->GetLongArrayElements(reinterpret_cast<jlongArray>(array),
&is_long_data_copied);
if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
+ return false;
}
- std::unique_ptr<std::vector<const DexFile*>> ret(new std::vector<const DexFile*>());
- ret->reserve(array_size);
- for (jsize i = 0; i < array_size; ++i) {
- ret->push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(*(long_data + i))));
+ oat_file = reinterpret_cast<const OatFile*>(static_cast<uintptr_t>(long_data[kOatFileIndex]));
+ dex_files.reserve(array_size - 1);
+ for (jsize i = kDexFileIndexStart; i < array_size; ++i) {
+ dex_files.push_back(reinterpret_cast<const DexFile*>(static_cast<uintptr_t>(long_data[i])));
}
env->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array), long_data, JNI_ABORT);
- if (env->ExceptionCheck() == JNI_TRUE) {
- return std::unique_ptr<std::vector<const DexFile*>>();
- }
-
- return ret;
+ return env->ExceptionCheck() != JNI_TRUE;
}
-static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
- std::vector<std::unique_ptr<const DexFile>>& vec) {
- size_t vec_size = vec.size();
- jlongArray long_array = env->NewLongArray(static_cast<jsize>(vec_size));
+static jlongArray ConvertDexFilesToJavaArray(JNIEnv* env,
+ const OatFile* oat_file,
+ std::vector<std::unique_ptr<const DexFile>>& vec) {
+ // Add one for the oat file.
+ jlongArray long_array = env->NewLongArray(static_cast<jsize>(kDexFileIndexStart + vec.size()));
if (env->ExceptionCheck() == JNI_TRUE) {
return nullptr;
}
@@ -84,10 +85,9 @@ static jlongArray ConvertNativeToJavaArray(JNIEnv* env,
return nullptr;
}
- jlong* tmp = long_data;
- for (auto& dex_file : vec) {
- *tmp = reinterpret_cast<uintptr_t>(dex_file.get());
- tmp++;
+ long_data[kOatFileIndex] = reinterpret_cast<uintptr_t>(oat_file);
+ for (size_t i = 0; i < vec.size(); ++i) {
+ long_data[kDexFileIndexStart + i] = reinterpret_cast<uintptr_t>(vec[i].get());
}
env->ReleaseLongArrayElements(long_array, long_data, 0);
@@ -160,14 +160,19 @@ static jobject DexFile_openDexFileNative(
return 0;
}
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
+ ClassLinker* linker = runtime->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
+ const OatFile* oat_file = nullptr;
- dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs);
+ dex_files = runtime->GetOatFileManager().OpenDexFilesFromOat(sourceName.c_str(),
+ outputName.c_str(),
+ /*out*/ &oat_file,
+ /*out*/ &error_msgs);
if (!dex_files.empty()) {
- jlongArray array = ConvertNativeToJavaArray(env, dex_files);
+ jlongArray array = ConvertDexFilesToJavaArray(env, oat_file, dex_files);
if (array == nullptr) {
ScopedObjectAccess soa(env);
for (auto& dex_file : dex_files) {
@@ -193,43 +198,55 @@ static jobject DexFile_openDexFileNative(
}
static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
- ScopedObjectAccess soa(env);
- mirror::Object* dex_files_object = soa.Decode<mirror::Object*>(cookie);
- if (dex_files_object == nullptr) {
- ThrowNullPointerException("cookie == null");
+ std::vector<const DexFile*> dex_files;
+ const OatFile* oat_file;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, dex_files, oat_file)) {
+ Thread::Current()->AssertPendingException();
return JNI_FALSE;
}
- mirror::LongArray* dex_files = dex_files_object->AsLongArray();
-
- // Delete dex files associated with this dalvik.system.DexFile since there should not be running
- // code using it. dex_files is a vector due to multidex.
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Runtime* const runtime = Runtime::Current();
bool all_deleted = true;
- for (int32_t i = 0, count = dex_files->GetLength(); i < count; ++i) {
- auto* dex_file = reinterpret_cast<DexFile*>(dex_files->Get(i));
- if (dex_file == nullptr) {
- continue;
- }
- // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
- // are calls to DexFile.close while the ART DexFile is still in use.
- if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
- // Clear the element in the array so that we can call close again.
- dex_files->Set(i, 0);
- delete dex_file;
- } else {
- all_deleted = false;
+ {
+ ScopedObjectAccess soa(env);
+ mirror::Object* dex_files_object = soa.Decode<mirror::Object*>(cookie);
+ mirror::LongArray* long_dex_files = dex_files_object->AsLongArray();
+ // Delete dex files associated with this dalvik.system.DexFile since there should not be running
+ // code using it. dex_files is a vector due to multidex.
+ ClassLinker* const class_linker = runtime->GetClassLinker();
+ int32_t i = kDexFileIndexStart; // Oat file is at index 0.
+ for (const DexFile* dex_file : dex_files) {
+ if (dex_file != nullptr) {
+ // Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
+ // are calls to DexFile.close while the ART DexFile is still in use.
+ if (class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr) {
+ // Clear the element in the array so that we can call close again.
+ long_dex_files->Set(i, 0);
+ delete dex_file;
+ } else {
+ all_deleted = false;
+ }
+ }
+ ++i;
}
}
- // TODO: Also unmap the OatFile for this dalvik.system.DexFile.
-
+ // oat_file can be null if we are running without dex2oat.
+ if (all_deleted && oat_file != nullptr) {
+ // If all of the dex files are no longer in use we can unmap the corresponding oat file.
+ VLOG(class_linker) << "Unregistering " << oat_file;
+ runtime->GetOatFileManager().UnRegisterAndDeleteOatFile(oat_file);
+ }
return all_deleted ? JNI_TRUE : JNI_FALSE;
}
-static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, jobject javaLoader,
+static jclass DexFile_defineClassNative(JNIEnv* env,
+ jclass,
+ jstring javaName,
+ jobject javaLoader,
jobject cookie) {
- std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
- if (dex_files.get() == nullptr) {
+ std::vector<const DexFile*> dex_files;
+ const OatFile* oat_file;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out*/ dex_files, /*out*/ oat_file)) {
VLOG(class_linker) << "Failed to find dex_file";
DCHECK(env->ExceptionCheck());
return nullptr;
@@ -242,17 +259,23 @@ static jclass DexFile_defineClassNative(JNIEnv* env, jclass, jstring javaName, j
}
const std::string descriptor(DotToDescriptor(class_name.c_str()));
const size_t hash(ComputeModifiedUtf8Hash(descriptor.c_str()));
- for (auto& dex_file : *dex_files) {
+ for (auto& dex_file : dex_files) {
const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor.c_str(), hash);
if (dex_class_def != nullptr) {
ScopedObjectAccess soa(env);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- class_linker->RegisterDexFile(*dex_file);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::ClassLoader> class_loader(
hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
- mirror::Class* result = class_linker->DefineClass(soa.Self(), descriptor.c_str(), hash,
- class_loader, *dex_file, *dex_class_def);
+ class_linker->RegisterDexFile(
+ *dex_file,
+ class_linker->GetOrCreateAllocatorForClassLoader(class_loader.Get()));
+ mirror::Class* result = class_linker->DefineClass(soa.Self(),
+ descriptor.c_str(),
+ hash,
+ class_loader,
+ *dex_file,
+ *dex_class_def);
if (result != nullptr) {
VLOG(class_linker) << "DexFile_defineClassNative returning " << result
<< " for " << class_name.c_str();
@@ -273,8 +296,9 @@ struct CharPointerComparator {
// Note: this can be an expensive call, as we sort out duplicates in MultiDex files.
static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie) {
- std::unique_ptr<std::vector<const DexFile*>> dex_files = ConvertJavaArrayToNative(env, cookie);
- if (dex_files.get() == nullptr) {
+ const OatFile* oat_file = nullptr;
+ std::vector<const DexFile*> dex_files;
+ if (!ConvertJavaArrayToDexFiles(env, cookie, /*out */ dex_files, /* out */ oat_file)) {
DCHECK(env->ExceptionCheck());
return nullptr;
}
@@ -282,7 +306,7 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
// Push all class descriptors into a set. Use set instead of unordered_set as we want to
// retrieve all in the end.
std::set<const char*, CharPointerComparator> descriptors;
- for (auto& dex_file : *dex_files) {
+ for (auto& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
const char* descriptor = dex_file->GetClassDescriptor(class_def);
@@ -291,7 +315,8 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
}
// Now create output array and copy the set into it.
- jobjectArray result = env->NewObjectArray(descriptors.size(), WellKnownClasses::java_lang_String,
+ jobjectArray result = env->NewObjectArray(descriptors.size(),
+ WellKnownClasses::java_lang_String,
nullptr);
if (result != nullptr) {
auto it = descriptors.begin();
@@ -309,9 +334,11 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
return result;
}
-static jint GetDexOptNeeded(JNIEnv* env, const char* filename,
- const char* pkgname, const char* instruction_set, const jboolean defer) {
-
+static jint GetDexOptNeeded(JNIEnv* env,
+ const char* filename,
+ const char* pkgname,
+ const char* instruction_set,
+ const jboolean defer) {
if ((filename == nullptr) || !OS::FileExists(filename)) {
LOG(ERROR) << "DexFile_getDexOptNeeded file '" << filename << "' does not exist";
ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
@@ -361,8 +388,12 @@ static jint GetDexOptNeeded(JNIEnv* env, const char* filename,
return oat_file_assistant.GetDexOptNeeded();
}
-static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
- jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
+static jint DexFile_getDexOptNeeded(JNIEnv* env,
+ jclass,
+ jstring javaFilename,
+ jstring javaPkgname,
+ jstring javaInstructionSet,
+ jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
if (env->ExceptionCheck()) {
return 0;
@@ -375,8 +406,11 @@ static jint DexFile_getDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename,
return 0;
}
- return GetDexOptNeeded(env, filename.c_str(), pkgname.c_str(),
- instruction_set.c_str(), defer);
+ return GetDexOptNeeded(env,
+ filename.c_str(),
+ pkgname.c_str(),
+ instruction_set.c_str(),
+ defer);
}
// public API, null pkgname
diff --git a/runtime/native/dalvik_system_DexFile.h b/runtime/native/dalvik_system_DexFile.h
index 7585ab972c..77d219dfad 100644
--- a/runtime/native/dalvik_system_DexFile.h
+++ b/runtime/native/dalvik_system_DexFile.h
@@ -18,9 +18,13 @@
#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_
#include <jni.h>
+#include <unistd.h>
namespace art {
+constexpr size_t kOatFileIndex = 0;
+constexpr size_t kDexFileIndexStart = 1;
+
class DexFile;
void register_dalvik_system_DexFile(JNIEnv* env);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 4f957233c4..4c5dc3ad25 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -497,7 +497,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
const DexFile* dex_file = boot_class_path[i];
CHECK(dex_file != nullptr);
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->RegisterDexFile(*dex_file)));
+ Handle<mirror::DexCache> dex_cache(
+ hs.NewHandle(linker->RegisterDexFile(*dex_file, runtime->GetLinearAlloc())));
if (kPreloadDexCachesStrings) {
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 5da15df25b..3a73900efa 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -522,6 +522,10 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
}
if (classes == nullptr) {
// Return an empty array instead of a null pointer.
+ if (soa.Self()->IsExceptionPending()) {
+ // Pending exception from GetDeclaredClasses.
+ return nullptr;
+ }
mirror::Class* class_class = mirror::Class::GetJavaLangClass();
mirror::Class* class_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index e1e9ceb8c6..45b948408e 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -103,10 +103,17 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA
// If caller is null, then we called from JNI, just avoid the check since JNI avoids most
// access checks anyways. TODO: Investigate if this the correct behavior.
if (caller != nullptr && !caller->CanAccess(c.Get())) {
- soa.Self()->ThrowNewExceptionF(
- "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
- PrettyClass(c.Get()).c_str(), PrettyClass(caller).c_str());
- return nullptr;
+ if (PrettyDescriptor(c.Get()) == "dalvik.system.DexPathList$Element") {
+ // b/20699073.
+ LOG(WARNING) << "The dalvik.system.DexPathList$Element constructor is not accessible by "
+ "default. This is a temporary workaround for backwards compatibility "
+ "with class-loader hacks. Please update your application.";
+ } else {
+ soa.Self()->ThrowNewExceptionF(
+ "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
+ PrettyClass(c.Get()).c_str(), PrettyClass(caller).c_str());
+ return nullptr;
+ }
}
}
if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), c, true, true)) {
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 5725b6ff6c..5625499848 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -134,6 +134,9 @@ bool OatHeader::IsValid() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return false;
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return false;
+ }
return true;
}
@@ -156,6 +159,9 @@ std::string OatHeader::GetValidationErrorMessage() const {
if (!IsAligned<kPageSize>(image_patch_delta_)) {
return "Image patch delta not page-aligned.";
}
+ if (!IsValidInstructionSet(instruction_set_)) {
+ return StringPrintf("Invalid instruction set, %d.", static_cast<int>(instruction_set_));
+ }
return "";
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 24acbc8c4a..2aa5783bde 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '7', '1', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '7', '2', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file-inl.h b/runtime/oat_file-inl.h
index 5df652579f..f7913e177a 100644
--- a/runtime/oat_file-inl.h
+++ b/runtime/oat_file-inl.h
@@ -22,7 +22,7 @@
namespace art {
inline const OatQuickMethodHeader* OatFile::OatMethod::GetOatQuickMethodHeader() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -47,7 +47,7 @@ inline uint32_t OatFile::OatMethod::GetQuickCodeSizeOffset() const {
}
inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -55,7 +55,7 @@ inline size_t OatFile::OatMethod::GetFrameSizeInBytes() const {
}
inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -63,7 +63,7 @@ inline uint32_t OatFile::OatMethod::GetCoreSpillMask() const {
}
inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetQuickCode());
+ const void* code = EntryPointToCodePointer(GetQuickCode());
if (code == nullptr) {
return 0u;
}
@@ -71,7 +71,7 @@ inline uint32_t OatFile::OatMethod::GetFpSpillMask() const {
}
inline const uint8_t* OatFile::OatMethod::GetGcMap() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -122,7 +122,7 @@ inline uint32_t OatFile::OatMethod::GetVmapTableOffsetOffset() const {
}
inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -134,7 +134,7 @@ inline const uint8_t* OatFile::OatMethod::GetMappingTable() const {
}
inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return nullptr;
}
@@ -146,7 +146,7 @@ inline const uint8_t* OatFile::OatMethod::GetVmapTable() const {
}
inline uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
- const void* code = ArtMethod::EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
+ const void* code = EntryPointToCodePointer(GetOatPointer<const void*>(code_offset_));
if (code == nullptr) {
return 0u;
}
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index a4a159e0da..e861921130 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -42,9 +42,11 @@
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "oat_file-inl.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "runtime.h"
#include "utils.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
#include "vmap_table.h"
namespace art {
@@ -115,15 +117,35 @@ OatFile* OatFile::Open(const std::string& filename,
// TODO: Also try when not executable? The issue here could be re-mapping as writable (as
// !executable is a sign that we may want to patch), which may not be allowed for
// various reasons.
- if (kUseDlopen && (kIsTargetBuild || kUseDlopenOnHost) && executable) {
- // Try to use dlopen. This may fail for various reasons, outlined below. We try dlopen, as
- // this will register the oat file with the linker and allows libunwind to find our info.
- ret.reset(OpenDlopen(filename, location, requested_base, abs_dex_location, error_msg));
- if (ret.get() != nullptr) {
- return ret.release();
+ // dlopen always returns the same library if it is already opened on the host. For this reason
+ // we only use dlopen if we are the target or we do not already have the dex file opened. Having
+ // the same library loaded multiple times at different addresses is required for class unloading
+ // and for having dex caches arrays in the .bss section.
+ Runtime* const runtime = Runtime::Current();
+ OatFileManager* const manager = (runtime != nullptr) ? &runtime->GetOatFileManager() : nullptr;
+ if (kUseDlopen && executable) {
+ bool success = kIsTargetBuild;
+ bool reserved_location = false;
+ // Manager may be null if we are running without a runtime.
+ if (!success && kUseDlopenOnHost && manager != nullptr) {
+ // RegisterOatFileLocation returns false if we are not the first caller to register that
+ // location.
+ reserved_location = manager->RegisterOatFileLocation(location);
+ success = reserved_location;
}
- if (kPrintDlOpenErrorMessage) {
- LOG(ERROR) << "Failed to dlopen: " << *error_msg;
+ if (success) {
+ // Try to use dlopen. This may fail for various reasons, outlined below. We try dlopen, as
+ // this will register the oat file with the linker and allows libunwind to find our info.
+ ret.reset(OpenDlopen(filename, location, requested_base, abs_dex_location, error_msg));
+ if (reserved_location) {
+ manager->UnRegisterOatFileLocation(location);
+ }
+ if (ret != nullptr) {
+ return ret.release();
+ }
+ if (kPrintDlOpenErrorMessage) {
+ LOG(ERROR) << "Failed to dlopen: " << *error_msg;
+ }
}
}
@@ -204,6 +226,10 @@ OatFile::OatFile(const std::string& location, bool is_executable)
is_executable_(is_executable), dlopen_handle_(nullptr),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
+ Runtime* const runtime = Runtime::Current();
+ if (runtime != nullptr && !runtime->IsAotCompiler()) {
+ runtime->GetOatFileManager().RegisterOatFileLocation(location);
+ }
}
OatFile::~OatFile() {
@@ -211,6 +237,10 @@ OatFile::~OatFile() {
if (dlopen_handle_ != nullptr) {
dlclose(dlopen_handle_);
}
+ Runtime* const runtime = Runtime::Current();
+ if (runtime != nullptr && !runtime->IsAotCompiler()) {
+ runtime->GetOatFileManager().UnRegisterOatFileLocation(location_);
+ }
}
bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
@@ -218,10 +248,7 @@ bool OatFile::Dlopen(const std::string& elf_filename, uint8_t* requested_base,
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename);
- UNUSED(requested_base);
- UNUSED(abs_dex_location);
- UNUSED(error_msg);
+ UNUSED(elf_filename, requested_base, abs_dex_location, error_msg);
return false;
#else
{
@@ -403,6 +430,8 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
+ size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
+ const uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
@@ -484,6 +513,22 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
return false;
}
+ const uint8_t* current_dex_cache_arrays = nullptr;
+ if (dex_cache_arrays != nullptr) {
+ DexCacheArraysLayout layout(pointer_size, *header);
+ if (layout.Size() != 0u) {
+ if (static_cast<size_t>(bss_end_ - dex_cache_arrays) < layout.Size()) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with "
+ "truncated dex cache arrays, %zd < %zd.",
+ GetLocation().c_str(), i, dex_file_location.c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays), layout.Size());
+ return false;
+ }
+ current_dex_cache_arrays = dex_cache_arrays;
+ dex_cache_arrays += layout.Size();
+ }
+ }
+
std::string canonical_location = DexFile::GetDexCanonicalLocation(dex_file_location.c_str());
// Create the OatDexFile and add it to the owning container.
@@ -492,7 +537,8 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
canonical_location,
dex_file_checksum,
dex_file_pointer,
- methods_offsets_pointer);
+ methods_offsets_pointer,
+ current_dex_cache_arrays);
oat_dex_files_storage_.push_back(oat_dex_file);
// Add the location and canonical location (if different) to the oat_dex_files_ table.
@@ -503,6 +549,15 @@ bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
oat_dex_files_.Put(canonical_key, oat_dex_file);
}
}
+
+ if (dex_cache_arrays != bss_end_) {
+ // We expect the bss section to be either empty (dex_cache_arrays and bss_end_
+ // both null) or contain just the dex cache arrays and nothing else.
+ *error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zd bytes.",
+ GetLocation().c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays));
+ return false;
+ }
return true;
}
@@ -605,13 +660,15 @@ OatFile::OatDexFile::OatDexFile(const OatFile* oat_file,
const std::string& canonical_dex_file_location,
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer)
+ const uint32_t* oat_class_offsets_pointer,
+ const uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
canonical_dex_file_location_(canonical_dex_file_location),
dex_file_location_checksum_(dex_file_location_checksum),
dex_file_pointer_(dex_file_pointer),
- oat_class_offsets_pointer_(oat_class_offsets_pointer) {}
+ oat_class_offsets_pointer_(oat_class_offsets_pointer),
+ dex_cache_arrays_(dex_cache_arrays) {}
OatFile::OatDexFile::~OatDexFile() {}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 27f8677f03..34f014123b 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -29,6 +29,7 @@
#include "mirror/class.h"
#include "oat.h"
#include "os.h"
+#include "utils.h"
namespace art {
@@ -395,6 +396,10 @@ class OatDexFile FINAL {
// Returns the offset to the OatClass information. Most callers should use GetOatClass.
uint32_t GetOatClassOffset(uint16_t class_def_index) const;
+ const uint8_t* GetDexCacheArrays() const {
+ return dex_cache_arrays_;
+ }
+
~OatDexFile();
private:
@@ -403,7 +408,8 @@ class OatDexFile FINAL {
const std::string& canonical_dex_file_location,
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
- const uint32_t* oat_class_offsets_pointer);
+ const uint32_t* oat_class_offsets_pointer,
+ const uint8_t* dex_cache_arrays);
const OatFile* const oat_file_;
const std::string dex_file_location_;
@@ -411,6 +417,7 @@ class OatDexFile FINAL {
const uint32_t dex_file_location_checksum_;
const uint8_t* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
+ const uint8_t* const dex_cache_arrays_;
friend class OatFile;
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 20347a9063..2c81eddf39 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -31,6 +31,7 @@
#include "compiler_callbacks.h"
#include "gc/space/image_space.h"
#include "mem_map.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -953,18 +954,21 @@ class RaceGenerateTask : public Task {
loaded_oat_file_(nullptr)
{}
- void Run(Thread* self) {
- UNUSED(self);
-
+ void Run(Thread* self ATTRIBUTE_UNUSED) {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs);
+ const OatFile* oat_file = nullptr;
+ dex_files = Runtime::Current()->GetOatFileManager().OpenDexFilesFromOat(
+ dex_location_.c_str(),
+ oat_location_.c_str(),
+ &oat_file,
+ &error_msgs);
CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
CHECK(dex_files[0]->GetOatDexFile() != nullptr) << dex_files[0]->GetLocation();
loaded_oat_file_ = dex_files[0]->GetOatDexFile()->GetOatFile();
+ CHECK_EQ(loaded_oat_file_, oat_file);
}
const OatFile* GetLoadedOatFile() const {
@@ -980,8 +984,9 @@ class RaceGenerateTask : public Task {
// Test the case where multiple processes race to generate an oat file.
// This simulates multiple processes using multiple threads.
//
-// We want only one Oat file to be loaded when there is a race to load, to
-// avoid using up the virtual memory address space.
+// We want unique Oat files to be loaded even when there is a race to load.
+// TODO: The test case no longer tests locking the way it was intended since we now get multiple
+// copies of the same Oat files mapped at different locations.
TEST_F(OatFileAssistantTest, RaceToGenerate) {
std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
@@ -1002,10 +1007,12 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
thread_pool.StartWorkers(self);
thread_pool.Wait(self, true, false);
- // Verify every task got the same pointer.
- const OatFile* expected = tasks[0]->GetLoadedOatFile();
+ // Verify every task got a unique oat file.
+ std::set<const OatFile*> oat_files;
for (auto& task : tasks) {
- EXPECT_EQ(expected, task->GetLoadedOatFile());
+ const OatFile* oat_file = task->GetLoadedOatFile();
+ EXPECT_TRUE(oat_files.find(oat_file) == oat_files.end());
+ oat_files.insert(oat_file);
}
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
new file mode 100644
index 0000000000..9eee156bb0
--- /dev/null
+++ b/runtime/oat_file_manager.cc
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_manager.h"
+
+#include <memory>
+#include <queue>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+#include "dex_file-inl.h"
+#include "gc/space/image_space.h"
+#include "oat_file_assistant.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// For b/21333911.
+// Only enabled for debug builds to prevent bit rot. There are too many performance regressions for
+// normal builds.
+static constexpr bool kDuplicateClassesCheck = kIsDebugBuild;
+
+const OatFile* OatFileManager::RegisterOatFile(std::unique_ptr<const OatFile> oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ DCHECK(oat_file != nullptr);
+ if (kIsDebugBuild) {
+ CHECK(oat_files_.find(oat_file) == oat_files_.end());
+ for (const std::unique_ptr<const OatFile>& existing : oat_files_) {
+ CHECK_NE(oat_file.get(), existing.get()) << oat_file->GetLocation();
+ // Check that we don't have an oat file with the same address. Copies of the same oat file
+ // should be loaded at different addresses.
+ CHECK_NE(oat_file->Begin(), existing->Begin()) << "Oat file already mapped at that location";
+ }
+ }
+ have_non_pic_oat_file_ = have_non_pic_oat_file_ || !oat_file->IsPic();
+ const OatFile* ret = oat_file.get();
+ oat_files_.insert(std::move(oat_file));
+ return ret;
+}
+
+void OatFileManager::UnRegisterAndDeleteOatFile(const OatFile* oat_file) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ DCHECK(oat_file != nullptr);
+ std::unique_ptr<const OatFile> compare(oat_file);
+ auto it = oat_files_.find(compare);
+ CHECK(it != oat_files_.end());
+ oat_files_.erase(it);
+ compare.release();
+}
+
+const OatFile* OatFileManager::FindOpenedOatFileFromOatLocation(const std::string& oat_location)
+ const {
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ return FindOpenedOatFileFromOatLocationLocked(oat_location);
+}
+
+const OatFile* OatFileManager::FindOpenedOatFileFromOatLocationLocked(
+ const std::string& oat_location) const {
+ for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
+ if (oat_file->GetLocation() == oat_location) {
+ return oat_file.get();
+ }
+ }
+ return nullptr;
+}
+
+const OatFile* OatFileManager::GetBootOatFile() const {
+ gc::space::ImageSpace* image_space = Runtime::Current()->GetHeap()->GetImageSpace();
+ if (image_space == nullptr) {
+ return nullptr;
+ }
+ return image_space->GetOatFile();
+}
+
+const OatFile* OatFileManager::GetPrimaryOatFile() const {
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+ const OatFile* boot_oat_file = GetBootOatFile();
+ if (boot_oat_file != nullptr) {
+ for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
+ if (oat_file.get() != boot_oat_file) {
+ return oat_file.get();
+ }
+ }
+ }
+ return nullptr;
+}
+
+OatFileManager::~OatFileManager() {
+ // Explicitly clear oat_files_ since the OatFile destructor calls back into OatFileManager for
+ // UnRegisterOatFileLocation.
+ oat_files_.clear();
+}
+
+const OatFile* OatFileManager::RegisterImageOatFile(gc::space::ImageSpace* space) {
+ return RegisterOatFile(space->ReleaseOatFile());
+}
+
+class DexFileAndClassPair : ValueObject {
+ public:
+ DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
+ : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
+ dex_file_(dex_file),
+ current_class_index_(current_class_index),
+ from_loaded_oat_(from_loaded_oat) {}
+
+ DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
+
+ DexFileAndClassPair& operator=(const DexFileAndClassPair& rhs) = default;
+
+ const char* GetCachedDescriptor() const {
+ return cached_descriptor_;
+ }
+
+ bool operator<(const DexFileAndClassPair& rhs) const {
+ const int cmp = strcmp(cached_descriptor_, rhs.cached_descriptor_);
+ if (cmp != 0) {
+ // Note that the order must be reversed. We want to iterate over the classes in dex files.
+ // They are sorted lexicographically. Thus, the priority-queue must be a min-queue.
+ return cmp > 0;
+ }
+ return dex_file_ < rhs.dex_file_;
+ }
+
+ bool DexFileHasMoreClasses() const {
+ return current_class_index_ + 1 < dex_file_->NumClassDefs();
+ }
+
+ void Next() {
+ ++current_class_index_;
+ cached_descriptor_ = GetClassDescriptor(dex_file_.get(), current_class_index_);
+ }
+
+ size_t GetCurrentClassIndex() const {
+ return current_class_index_;
+ }
+
+ bool FromLoadedOat() const {
+ return from_loaded_oat_;
+ }
+
+ const DexFile* GetDexFile() const {
+ return dex_file_.get();
+ }
+
+ private:
+ static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
+ DCHECK(IsUint<16>(index));
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
+ return dex_file->StringByTypeIdx(class_def.class_idx_);
+ }
+
+ const char* cached_descriptor_;
+ std::shared_ptr<const DexFile> dex_file_;
+ size_t current_class_index_;
+ bool from_loaded_oat_; // We only need to compare mismatches between what we load now
+ // and what was loaded before. Any old duplicates must have been
+ // OK, and any new "internal" duplicates are as well (they must
+ // be from multidex, which resolves correctly).
+};
+
+static void AddDexFilesFromOat(const OatFile* oat_file,
+ bool already_loaded,
+ /*out*/std::priority_queue<DexFileAndClassPair>* heap) {
+ for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
+ std::string error;
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
+ if (dex_file == nullptr) {
+ LOG(WARNING) << "Could not create dex file from oat file: " << error;
+ } else if (dex_file->NumClassDefs() > 0U) {
+ heap->emplace(dex_file.release(), /*current_class_index*/0U, already_loaded);
+ }
+ }
+}
+
+static void AddNext(/*inout*/DexFileAndClassPair* original,
+ /*inout*/std::priority_queue<DexFileAndClassPair>* heap) {
+ if (original->DexFileHasMoreClasses()) {
+ original->Next();
+ heap->push(std::move(*original));
+ }
+}
+
+// Check for class-def collisions in dex files.
+//
+// This works by maintaining a heap with one class from each dex file, sorted by the class
+// descriptor. Then a dex-file/class pair is continually removed from the heap and compared
+// against the following top element. If the descriptor is the same, it is now checked whether
+// the two elements agree on whether their dex file was from an already-loaded oat-file or the
+// new oat file. Any disagreement indicates a collision.
+bool OatFileManager::HasCollisions(const OatFile* oat_file,
+ std::string* error_msg /*out*/) const {
+ DCHECK(oat_file != nullptr);
+ DCHECK(error_msg != nullptr);
+ if (!kDuplicateClassesCheck) {
+ return false;
+ }
+
+ // Dex files are registered late - once a class is actually being loaded. We have to compare
+ // against the open oat files. Take the oat_file_manager_lock_ that protects oat_files_ accesses.
+ ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
+
+ std::priority_queue<DexFileAndClassPair> queue;
+
+ // Add dex files from already loaded oat files, but skip boot.
+ const OatFile* boot_oat = GetBootOatFile();
+ // The same OatFile can be loaded multiple times at different addresses. In this case, we don't
+ // need to check both against each other since they would have resolved the same way at compile
+ // time.
+ std::unordered_set<std::string> unique_locations;
+ for (const std::unique_ptr<const OatFile>& loaded_oat_file : oat_files_) {
+ DCHECK_NE(loaded_oat_file.get(), oat_file);
+ const std::string& location = loaded_oat_file->GetLocation();
+ if (loaded_oat_file.get() != boot_oat &&
+ location != oat_file->GetLocation() &&
+ unique_locations.find(location) == unique_locations.end()) {
+ unique_locations.insert(location);
+ AddDexFilesFromOat(loaded_oat_file.get(), /*already_loaded*/true, &queue);
+ }
+ }
+
+ if (queue.empty()) {
+ // No other oat files, return early.
+ return false;
+ }
+
+ // Add dex files from the oat file to check.
+ AddDexFilesFromOat(oat_file, /*already_loaded*/false, &queue);
+
+ // Now drain the queue.
+ while (!queue.empty()) {
+ // Modifying the top element is only safe if we pop right after.
+ DexFileAndClassPair compare_pop(queue.top());
+ queue.pop();
+
+ // Compare against the following elements.
+ while (!queue.empty()) {
+ DexFileAndClassPair top(queue.top());
+
+ if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
+ // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
+ if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
+ *error_msg =
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ compare_pop.GetCachedDescriptor(),
+ compare_pop.GetDexFile()->GetLocation().c_str(),
+ top.GetDexFile()->GetLocation().c_str());
+ return true;
+ }
+ queue.pop();
+ AddNext(&top, &queue);
+ } else {
+ // Something else. Done here.
+ break;
+ }
+ }
+ AddNext(&compare_pop, &queue);
+ }
+
+ return false;
+}
+
+std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
+ const char* dex_location,
+ const char* oat_location,
+ const OatFile** out_oat_file,
+ std::vector<std::string>* error_msgs) {
+ CHECK(dex_location != nullptr);
+ CHECK(error_msgs != nullptr);
+
+ // Verify we aren't holding the mutator lock, which could starve GC if we
+ // have to generate or relocate an oat file.
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current());
+
+ OatFileAssistant oat_file_assistant(dex_location,
+ oat_location,
+ kRuntimeISA,
+ !Runtime::Current()->IsAotCompiler());
+
+ // Lock the target oat location to avoid races generating and loading the
+ // oat file.
+ std::string error_msg;
+ if (!oat_file_assistant.Lock(/*out*/&error_msg)) {
+ // Don't worry too much if this fails. If it does fail, it's unlikely we
+ // can generate an oat file anyway.
+ VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
+ }
+
+ const OatFile* source_oat_file = nullptr;
+
+ // Update the oat file on disk if we can. This may fail, but that's okay.
+ // Best effort is all that matters here.
+ if (!oat_file_assistant.MakeUpToDate(/*out*/&error_msg)) {
+ LOG(WARNING) << error_msg;
+ }
+
+ // Get the oat file on disk.
+ std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
+ if (oat_file != nullptr) {
+ // Take the file only if it has no collisions, or we must take it because of preopting.
+ bool accept_oat_file = !HasCollisions(oat_file.get(), /*out*/ &error_msg);
+ if (!accept_oat_file) {
+ // Failed the collision check. Print warning.
+ if (Runtime::Current()->IsDexFileFallbackEnabled()) {
+ LOG(WARNING) << "Found duplicate classes, falling back to interpreter mode for "
+ << dex_location;
+ } else {
+ LOG(WARNING) << "Found duplicate classes, dex-file-fallback disabled, will be failing to "
+ " load classes for " << dex_location;
+ }
+ LOG(WARNING) << error_msg;
+
+ // However, if the app was part of /system and preopted, there is no original dex file
+ // available. In that case grudgingly accept the oat file.
+ if (!DexFile::MaybeDex(dex_location)) {
+ accept_oat_file = true;
+ LOG(WARNING) << "Dex location " << dex_location << " does not seem to include dex file. "
+ << "Allow oat file use. This is potentially dangerous.";
+ }
+ }
+
+ if (accept_oat_file) {
+ VLOG(class_linker) << "Registering " << oat_file->GetLocation();
+ source_oat_file = RegisterOatFile(std::move(oat_file));
+ *out_oat_file = source_oat_file;
+ }
+ }
+
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+
+ // Load the dex files from the oat file.
+ if (source_oat_file != nullptr) {
+ dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
+ if (dex_files.empty()) {
+ error_msgs->push_back("Failed to open dex files from " + source_oat_file->GetLocation());
+ }
+ }
+
+ // Fall back to running out of the original dex file if we couldn't load any
+ // dex_files from the oat file.
+ if (dex_files.empty()) {
+ if (oat_file_assistant.HasOriginalDexFiles()) {
+ if (Runtime::Current()->IsDexFileFallbackEnabled()) {
+ if (!DexFile::Open(dex_location, dex_location, /*out*/ &error_msg, &dex_files)) {
+ LOG(WARNING) << error_msg;
+ error_msgs->push_back("Failed to open dex files from " + std::string(dex_location));
+ }
+ } else {
+ error_msgs->push_back("Fallback mode disabled, skipping dex files.");
+ }
+ } else {
+ error_msgs->push_back("No original dex files found for dex location "
+ + std::string(dex_location));
+ }
+ }
+ return dex_files;
+}
+
+bool OatFileManager::RegisterOatFileLocation(const std::string& oat_location) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_count_lock_);
+ auto it = oat_file_count_.find(oat_location);
+ if (it != oat_file_count_.end()) {
+ ++it->second;
+ return false;
+ }
+ oat_file_count_.insert(std::pair<std::string, size_t>(oat_location, 1u));
+ return true;
+}
+
+void OatFileManager::UnRegisterOatFileLocation(const std::string& oat_location) {
+ WriterMutexLock mu(Thread::Current(), *Locks::oat_file_count_lock_);
+ auto it = oat_file_count_.find(oat_location);
+ if (it != oat_file_count_.end()) {
+ --it->second;
+ if (it->second == 0) {
+ oat_file_count_.erase(it);
+ }
+ }
+}
+
+} // namespace art
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
new file mode 100644
index 0000000000..af7efb4262
--- /dev/null
+++ b/runtime/oat_file_manager.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_MANAGER_H_
+#define ART_RUNTIME_OAT_FILE_MANAGER_H_
+
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+
+namespace art {
+
+namespace gc {
+namespace space {
+class ImageSpace;
+} // namespace space
+} // namespace gc
+
+class DexFile;
+class OatFile;
+
+// Class for dealing with oat file management.
+//
+// This class knows about all the loaded oat files and provides utility functions. The oat file
+// pointers returned from functions are always valid.
+class OatFileManager {
+ public:
+ OatFileManager() : have_non_pic_oat_file_(false) {}
+ ~OatFileManager();
+
+ // Add an oat file to the internal accounting, std::aborts if there already exists an oat file
+ // with the same base address. Returns the oat file pointer from oat_file.
+ const OatFile* RegisterOatFile(std::unique_ptr<const OatFile> oat_file)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ void UnRegisterAndDeleteOatFile(const OatFile* oat_file)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Find the first opened oat file with the same location, returns null if there are none.
+ const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) const
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Attempt to reserve a location, returns false if it is already reserved or already in used by
+ // an oat file.
+ bool RegisterOatFileLocation(const std::string& oat_location)
+ REQUIRES(!Locks::oat_file_count_lock_);
+
+ // Unreserve oat file location, should only be used for error cases since RegisterOatFile will
+ // remove the reserved location.
+ void UnRegisterOatFileLocation(const std::string& oat_location)
+ REQUIRES(!Locks::oat_file_count_lock_);
+
+ // Returns true if we have a non pic oat file.
+ bool HaveNonPicOatFile() const {
+ return have_non_pic_oat_file_;
+ }
+
+ // Returns the boot image oat file.
+ const OatFile* GetBootOatFile() const;
+
+ // Returns the first non-image oat file in the class path.
+ const OatFile* GetPrimaryOatFile() const REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Return the oat file for an image, registers the oat file. Takes ownership of the imagespace's
+ // underlying oat file.
+ const OatFile* RegisterImageOatFile(gc::space::ImageSpace* space)
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ // Finds or creates the oat file holding dex_location. Then loads and returns
+ // all corresponding dex files (there may be more than one dex file loaded
+ // in the case of multidex).
+ // This may return the original, unquickened dex files if the oat file could
+ // not be generated.
+ //
+ // Returns an empty vector if the dex files could not be loaded. In this
+ // case, there will be at least one error message returned describing why no
+ // dex files could not be loaded. The 'error_msgs' argument must not be
+ // null, regardless of whether there is an error or not.
+ //
+ // This method should not be called with the mutator_lock_ held, because it
+ // could end up starving GC if we need to generate or relocate any oat
+ // files.
+ std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
+ const char* dex_location,
+ const char* oat_location,
+ /*out*/ const OatFile** out_oat_file,
+ /*out*/ std::vector<std::string>* error_msgs)
+ REQUIRES(!Locks::oat_file_manager_lock_, !Locks::mutator_lock_);
+
+ private:
+ // Check for duplicate class definitions of the given oat file against all open oat files.
+ // Return true if there are any class definition collisions in the oat_file.
+ bool HasCollisions(const OatFile* oat_file, /*out*/std::string* error_msg) const
+ REQUIRES(!Locks::oat_file_manager_lock_);
+
+ const OatFile* FindOpenedOatFileFromOatLocationLocked(const std::string& oat_location) const
+ REQUIRES(Locks::oat_file_manager_lock_);
+
+ std::set<std::unique_ptr<const OatFile>> oat_files_ GUARDED_BY(Locks::oat_file_manager_lock_);
+ std::unordered_map<std::string, size_t> oat_file_count_ GUARDED_BY(Locks::oat_file_count_lock_);
+ bool have_non_pic_oat_file_;
+ DISALLOW_COPY_AND_ASSIGN(OatFileManager);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_MANAGER_H_
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 50e2053c73..ae16c7f373 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -269,10 +269,10 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xfingerprint:_")
.WithType<std::string>()
.IntoKey(M::Fingerprint)
- .Define({"-Xexperimental-lambdas", "-Xnoexperimental-lambdas"})
- .WithType<bool>()
- .WithValues({true, false})
- .IntoKey(M::ExperimentalLambdas)
+ .Define("-Xexperimental:_")
+ .WithType<ExperimentalFlags>()
+ .AppendValues()
+ .IntoKey(M::Experimental)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
@@ -557,7 +557,14 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
args.Set(M::HeapGrowthLimit, args.GetOrDefault(M::MemoryMaximumSize));
}
- if (args.GetOrDefault(M::ExperimentalLambdas)) {
+ if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kDefaultMethods) {
+ LOG(WARNING) << "Default method support has been enabled. The verifier will be less strict "
+ << "in some cases. All existing invoke opcodes have an unstable updated "
+ << "specification and are nearly guaranteed to change over time. Do not attempt "
+ << "to write shipping code against the invoke opcodes with this flag.";
+ }
+
+ if (args.GetOrDefault(M::Experimental) & ExperimentalFlags::kLambdas) {
LOG(WARNING) << "Experimental lambdas have been enabled. All lambda opcodes have "
<< "an unstable specification and are nearly guaranteed to change over time. "
<< "Do not attempt to write shipping code against these opcodes.";
@@ -682,8 +689,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -X[no]image-dex2oat (Whether to create and use a boot image)\n");
UsageMessage(stream, " -Xno-dex-file-fallback "
"(Don't fall back to dex files without oat files)\n");
- UsageMessage(stream, " -X[no]experimental-lambdas\n"
- " (Enable new experimental dalvik opcodes, off by default)\n");
+ UsageMessage(stream, " -Xexperimental:{lambdas,default-methods} "
+ "(Enable new experimental dalvik opcodes and semantics, off by default)\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 64c2249925..837662d879 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -74,6 +74,7 @@ enum InlineMethodOpcode : uint16_t {
kIntrinsicUnsafeGet,
kIntrinsicUnsafePut,
kIntrinsicSystemArrayCopyCharArray,
+ kIntrinsicSystemArrayCopy,
kInlineOpNop,
kInlineOpReturnArg,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 63f43cf3b2..7ba19ab8d6 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -17,6 +17,7 @@
#include "quick_exception_handler.h"
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils.h"
@@ -26,6 +27,7 @@
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
+#include "stack_map.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -99,7 +101,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
exception_handler_->SetHandlerMethod(method);
exception_handler_->SetHandlerDexPc(found_dex_pc);
exception_handler_->SetHandlerQuickFramePc(
- method->ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
+ GetCurrentCode().ToNativeQuickPc(found_dex_pc, /* is_catch_handler */ true));
exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
return false; // End stack walk.
} else if (UNLIKELY(GetThread()->HasDebuggerShadowFrames())) {
@@ -159,7 +161,7 @@ void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
// If the handler is in optimized code, we need to set the catch environment.
if (*handler_quick_frame_ != nullptr &&
handler_method_ != nullptr &&
- handler_method_->IsOptimized(sizeof(void*))) {
+ ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*))) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
}
@@ -200,14 +202,14 @@ static VRegKind ToVRegKind(DexRegisterLocation::Kind kind) {
void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
DCHECK(!is_deoptimization_);
DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
- DCHECK(handler_method_ != nullptr && handler_method_->IsOptimized(sizeof(void*)));
+ DCHECK(handler_method_ != nullptr && ArtCode(handler_quick_frame_).IsOptimized(sizeof(void*)));
if (kDebugExceptionDelivery) {
self_->DumpStack(LOG(INFO) << "Setting catch phis: ");
}
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
- CodeInfo code_info = handler_method_->GetOptimizedCodeInfo();
+ CodeInfo code_info = ArtCode(handler_quick_frame_).GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
// Find stack map of the throwing instruction.
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1f447d076b..6c459a3950 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -64,6 +64,7 @@
#include "debugger.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
+#include "experimental_flags.h"
#include "fault_handler.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -115,6 +116,7 @@
#include "native/sun_misc_Unsafe.h"
#include "native_bridge_art_interface.h"
#include "oat_file.h"
+#include "oat_file_manager.h"
#include "os.h"
#include "parsed_options.h"
#include "profiler.h"
@@ -138,6 +140,12 @@ namespace art {
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
static constexpr bool kEnableJavaStackTraceHandler = false;
+// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
+// linking.
+static constexpr double kLowMemoryMinLoadFactor = 0.5;
+static constexpr double kLowMemoryMaxLoadFactor = 0.8;
+static constexpr double kNormalMinLoadFactor = 0.4;
+static constexpr double kNormalMaxLoadFactor = 0.7;
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -198,7 +206,9 @@ Runtime::Runtime()
no_sig_chain_(false),
is_native_bridge_loaded_(false),
zygote_max_failed_boots_(0),
- experimental_lambdas_(false) {
+ experimental_flags_(ExperimentalFlags::kNone),
+ oat_file_manager_(nullptr),
+ is_low_memory_mode_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
}
@@ -284,6 +294,7 @@ Runtime::~Runtime() {
delete heap_;
delete intern_table_;
delete java_vm_;
+ delete oat_file_manager_;
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
@@ -698,7 +709,7 @@ bool Runtime::IsShuttingDown(Thread* self) {
}
bool Runtime::IsDebuggable() const {
- const OatFile* oat_file = GetClassLinker()->GetPrimaryOatFile();
+ const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
return oat_file != nullptr && oat_file->IsDebuggable();
}
@@ -756,9 +767,9 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
if (elf_file.get() == nullptr) {
return false;
}
- std::unique_ptr<OatFile> oat_file(OatFile::OpenWithElfFile(elf_file.release(), oat_location,
- nullptr, &error_msg));
- if (oat_file.get() == nullptr) {
+ std::unique_ptr<const OatFile> oat_file(
+ OatFile::OpenWithElfFile(elf_file.release(), oat_location, nullptr, &error_msg));
+ if (oat_file == nullptr) {
LOG(INFO) << "Unable to use '" << oat_filename << "' because " << error_msg;
return false;
}
@@ -775,7 +786,7 @@ static bool OpenDexFilesFromImage(const std::string& image_location,
dex_files->push_back(std::move(dex_file));
}
}
- Runtime::Current()->GetClassLinker()->RegisterOatFile(oat_file.release());
+ Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
return true;
}
@@ -831,6 +842,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
QuasiAtomic::Startup();
+ oat_file_manager_ = new OatFileManager;
+
Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
@@ -880,7 +893,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
}
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
- experimental_lambdas_ = runtime_options.GetOrDefault(Opt::ExperimentalLambdas);
+ experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
+ is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
ATRACE_BEGIN("CreateHeap");
@@ -1426,6 +1440,7 @@ void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(visitor);
}
+ Dbg::VisitRoots(visitor);
}
void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
@@ -1798,4 +1813,12 @@ LinearAlloc* Runtime::CreateLinearAlloc() {
: new LinearAlloc(arena_pool_.get());
}
+double Runtime::GetHashTableMinLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
+}
+
+double Runtime::GetHashTableMaxLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6154c34ec5..7b1fdb21c4 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
+#include "experimental_flags.h"
#include "gc_root.h"
#include "instrumentation.h"
#include "jobject_comparator.h"
@@ -82,6 +83,7 @@ class LinearAlloc;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
+class OatFileManager;
class SignalCatcher;
class StackOverflowHandler;
class SuspensionHandler;
@@ -531,8 +533,8 @@ class Runtime {
return zygote_max_failed_boots_;
}
- bool AreExperimentalLambdasEnabled() const {
- return experimental_lambdas_;
+ bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
+ return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
lambda::BoxTable* GetLambdaBoxTable() const {
@@ -573,6 +575,14 @@ class Runtime {
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
+ OatFileManager& GetOatFileManager() const {
+ DCHECK(oat_file_manager_ != nullptr);
+ return *oat_file_manager_;
+ }
+
+ double GetHashTableMinLoadFactor() const;
+ double GetHashTableMaxLoadFactor() const;
+
private:
static void InitPlatformSignalHandlers();
@@ -763,13 +773,19 @@ class Runtime {
// eventually publish them as public-usable opcodes, but they aren't ready yet.
//
// Experimental opcodes should not be used by other production code.
- bool experimental_lambdas_;
+ ExperimentalFlags experimental_flags_;
MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
// Contains the build fingerprint, if given as a parameter.
std::string fingerprint_;
+ // Oat file manager, keeps track of what oat files are open.
+ OatFileManager* oat_file_manager_;
+
+ // Whether or not we are on a low RAM device.
+ bool is_low_memory_mode_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index f0b3c4e4cb..44a13c9020 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@ struct Backtrace {
public:
explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
+ DumpNativeStack(os, GetTid(), "\t", nullptr, nullptr, raw_context_);
}
private:
// Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d88e84b602..7b5bc1ad9a 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -114,7 +114,7 @@ RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
-RUNTIME_OPTIONS_KEY (bool, ExperimentalLambdas, false) // -X[no]experimental-lambdas
+RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{, lambdas, default-methods}
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 1d21a6494a..d8d916c7ee 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -17,6 +17,7 @@
#include "stack.h"
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "base/hex_dump.h"
#include "entrypoints/entrypoint_utils-inl.h"
@@ -110,9 +111,9 @@ StackVisitor::StackVisitor(Thread* thread,
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
- ArtMethod* outer_method = GetOuterMethod();
- uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ ArtCode outer_code = GetCurrentCode();
+ uint32_t native_pc_offset = outer_code.NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = outer_code.GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
@@ -142,7 +143,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
} else {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ return GetCurrentCode().ToDexPc(cur_quick_frame_pc_, abort_on_failure);
}
} else {
return 0;
@@ -160,7 +161,8 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else if (m->IsNative()) {
if (cur_quick_frame_ != nullptr) {
HandleScope* hs = reinterpret_cast<HandleScope*>(
- reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue());
+ reinterpret_cast<char*>(cur_quick_frame_) +
+ GetCurrentCode().GetHandleScopeOffset().SizeValue());
return hs->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
@@ -190,7 +192,7 @@ mirror::Object* StackVisitor::GetThisObject() const {
size_t StackVisitor::GetNativePcOffset() const {
DCHECK(!IsShadowFrame());
- return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_);
+ return GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
}
bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
@@ -199,10 +201,10 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) {
return false;
}
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return true; // TODO: Implement.
}
- const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -211,9 +213,7 @@ bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) {
size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_));
const uint8_t* reg_bitmap = nullptr;
if (num_regs > 0) {
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+ uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
}
@@ -252,7 +252,7 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
return true;
}
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return GetVRegFromOptimizedCode(m, vreg, kind, val);
} else {
return GetVRegFromQuickCode(m, vreg, kind, val);
@@ -266,10 +266,9 @@ bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t*
bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -289,19 +288,16 @@ bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind ki
bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- ArtMethod* outer_method = GetOuterMethod();
- const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
DCHECK_EQ(m, GetMethod());
const DexFile::CodeItem* code_item = m->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
uint16_t number_of_dex_registers = code_item->registers_size_;
DCHECK_LT(vreg, code_item->registers_size_);
- CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
@@ -406,7 +402,7 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
} else {
return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val);
@@ -420,10 +416,9 @@ bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
VRegKind kind_hi, uint64_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -482,7 +477,7 @@ bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegFromQuickCode(m, vreg, new_value, kind);
@@ -497,10 +492,8 @@ bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t ne
VRegKind kind) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) {
@@ -591,7 +584,7 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
if (cur_quick_frame_ != nullptr) {
DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
- if (GetOuterMethod()->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
return false;
} else {
return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi);
@@ -605,10 +598,9 @@ bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value,
bool StackVisitor::SetVRegPairFromQuickCode(
ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ DCHECK_EQ(m, GetMethod());
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
uint32_t vmap_offset_lo, vmap_offset_hi;
// TODO: IsInContext stops before spotting floating point registers.
if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
@@ -725,14 +717,14 @@ void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) {
uintptr_t StackVisitor::GetReturnPc() const {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
DCHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
return *reinterpret_cast<uintptr_t*>(pc_addr);
}
void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
CHECK(sp != nullptr);
- uint8_t* pc_addr = sp + GetOuterMethod()->GetReturnPcOffset().SizeValue();
+ uint8_t* pc_addr = sp + GetCurrentCode().GetReturnPcOffset().SizeValue();
*reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
}
@@ -867,9 +859,9 @@ void StackVisitor::SanityCheckFrame() const {
}
}
if (cur_quick_frame_ != nullptr) {
- method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
+ GetCurrentCode().AssertPcIsWithinQuickCode(cur_quick_frame_pc_);
// Frame sanity.
- size_t frame_size = method->GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
CHECK_NE(frame_size, 0u);
// A rough guess at an upper size we expect to see for a frame.
// 256 registers
@@ -880,7 +872,7 @@ void StackVisitor::SanityCheckFrame() const {
// const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
const size_t kMaxExpectedFrameSize = 2 * KB;
CHECK_LE(frame_size, kMaxExpectedFrameSize);
- size_t return_pc_offset = method->GetReturnPcOffset().SizeValue();
+ size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
CHECK_LT(return_pc_offset, frame_size);
}
}
@@ -891,6 +883,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
CHECK_EQ(cur_depth_, 0U);
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
+ size_t inlined_frames_count = 0;
for (const ManagedStack* current_fragment = thread_->GetManagedStack();
current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
@@ -906,10 +899,10 @@ void StackVisitor::WalkStack(bool include_transitions) {
SanityCheckFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
- && method->IsOptimized(sizeof(void*))) {
- CodeInfo code_info = method->GetOptimizedCodeInfo();
+ && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
- uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ uint32_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
@@ -922,6 +915,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
return;
}
cur_depth_++;
+ inlined_frames_count++;
}
}
}
@@ -934,9 +928,9 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (context_ != nullptr) {
context_->FillCalleeSaves(*this);
}
- size_t frame_size = method->GetFrameSizeInBytes();
+ size_t frame_size = GetCurrentCode().GetFrameSizeInBytes();
// Compute PC for next stack frame from return PC.
- size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue();
+ size_t return_pc_offset = GetCurrentCode().GetReturnPcOffset().SizeValue();
uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
if (UNLIKELY(exit_stubs_installed)) {
@@ -952,27 +946,32 @@ void StackVisitor::WalkStack(bool include_transitions) {
ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
<< PrettyMethod(GetMethod());
- } else if (instrumentation_frame.method_ != GetMethod()) {
- LOG(FATAL) << "Expected: " << PrettyMethod(instrumentation_frame.method_)
- << " Found: " << PrettyMethod(GetMethod());
+ } else {
+ CHECK_EQ(instrumentation_frame.method_, GetMethod())
+ << "Expected: " << PrettyMethod(instrumentation_frame.method_)
+ << " Found: " << PrettyMethod(GetMethod());
}
if (num_frames_ != 0) {
// Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
// recursion.
- CHECK(instrumentation_frame.frame_id_ == GetFrameId())
- << "Expected: " << instrumentation_frame.frame_id_
- << " Found: " << GetFrameId();
+ size_t frame_id = instrumentation::Instrumentation::ComputeFrameId(
+ thread_,
+ cur_depth_,
+ inlined_frames_count);
+ CHECK_EQ(instrumentation_frame.frame_id_, frame_id);
}
return_pc = instrumentation_frame.return_pc_;
}
}
+ ArtCode code = GetCurrentCode();
+
cur_quick_frame_pc_ = return_pc;
uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
if (kDebugStackWalk) {
LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
- << " optimized=" << method->IsOptimized(sizeof(void*))
+ << " optimized=" << code.IsOptimized(sizeof(void*))
<< " native=" << method->IsNative()
<< " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
<< "," << method->GetEntryPointFromJni()
@@ -1051,4 +1050,87 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
}
}
+void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+
+ // If there's an error during enter, we won't have locked the monitor. So check there's no
+ // exception.
+ if (self->IsExceptionPending()) {
+ return;
+ }
+
+ if (monitors_ == nullptr) {
+ monitors_.reset(new std::vector<mirror::Object*>());
+ }
+ monitors_->push_back(obj);
+}
+
+void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ bool found_object = false;
+ if (monitors_ != nullptr) {
+ // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
+ // We arbitrarily choose the first one.
+ auto it = std::find(monitors_->begin(), monitors_->end(), obj);
+ if (it != monitors_->end()) {
+ monitors_->erase(it);
+ found_object = true;
+ }
+ }
+ if (!found_object) {
+ // The object wasn't found. Time for an IllegalMonitorStateException.
+ // The order here isn't fully clear. Assume that any other pending exception is swallowed.
+ // TODO: Maybe make already pending exception a suppressed exception.
+ self->ClearException();
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not lock monitor on object of type '%s' before unlocking",
+ PrettyTypeOf(const_cast<mirror::Object*>(obj)).c_str());
+ }
+}
+
+// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
+// that the object was locked.
+void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(self != nullptr);
+ DCHECK(obj != nullptr);
+ obj->MonitorExit(self);
+}
+
+bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) {
+ DCHECK(self != nullptr);
+ if (monitors_ != nullptr) {
+ if (!monitors_->empty()) {
+ // There may be an exception pending, if the method is terminating abruptly. Clear it.
+ // TODO: Should we add this as a suppressed exception?
+ self->ClearException();
+
+ // OK, there are monitors that are still locked. To enforce structured locking (and avoid
+ // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
+ for (mirror::Object* obj : *monitors_) {
+ MonitorExitHelper(self, obj);
+ // If this raised an exception, ignore. TODO: Should we add this as suppressed
+ // exceptions?
+ if (self->IsExceptionPending()) {
+ self->ClearException();
+ }
+ }
+ // Raise an exception, just give the first object as the sample.
+ mirror::Object* first = (*monitors_)[0];
+ self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
+ "did not unlock monitor on object of type '%s'",
+ PrettyTypeOf(first).c_str());
+
+ // To make sure this path is not triggered again, clean out the monitors.
+ monitors_->clear();
+
+ return false;
+ }
+ }
+ return true;
+}
+
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 31acf0eb64..3e0566d2f0 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,7 +20,10 @@
#include <stdint.h>
#include <string>
+#include "art_code.h"
#include "arch/instruction_set.h"
+#include "base/macros.h"
+#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
#include "mirror/object_reference.h"
@@ -66,6 +69,72 @@ class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
+// Counting locks by storing object pointers into a vector. Duplicate entries mark recursive locks.
+// The vector will be visited with the ShadowFrame during GC (so all the locked-on objects are
+// thread roots).
+// Note: implementation is split so that the call sites may be optimized to no-ops in case no
+// lock counting is necessary. The actual implementation is in the cc file to avoid
+// dependencies.
+class LockCountData {
+ public:
+ // Add the given object to the list of monitors, that is, objects that have been locked. This
+ // will not throw (but be skipped if there is an exception pending on entry).
+ template <bool kLockCounting>
+ void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ AddMonitorInternal(self, obj);
+ }
+
+ // Try to remove the given object from the monitor list, indicating an unlock operation.
+ // This will throw an IllegalMonitorStateException (clearing any already pending exception), in
+ // case that there wasn't a lock recorded for the object.
+ template <bool kLockCounting>
+ void RemoveMonitorOrThrow(Thread* self,
+ const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return;
+ }
+ RemoveMonitorInternal(self, obj);
+ }
+
+ // Check whether all acquired monitors have been released. This will potentially throw an
+ // IllegalMonitorStateException, clearing any already pending exception. Returns true if the
+ // check shows that everything is OK wrt/ lock counting, false otherwise.
+ template <bool kLockCounting>
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(self != nullptr);
+ if (!kLockCounting) {
+ return true;
+ }
+ return CheckAllMonitorsReleasedInternal(self);
+ }
+
+ template <typename T, typename... Args>
+ void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
+ if (monitors_ != nullptr) {
+ // Visitors may change the Object*. Be careful with the foreach loop.
+ for (mirror::Object*& obj : *monitors_) {
+ visitor(/* inout */ &obj, std::forward<Args>(args)...);
+ }
+ }
+ }
+
+ private:
+ // Internal implementations.
+ void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RemoveMonitorInternal(Thread* self, const mirror::Object* obj)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Stores references to the locked-on objects. As noted, this should be visited during thread
+ // marking.
+ std::unique_ptr<std::vector<mirror::Object*>> monitors_;
+};
+
// ShadowFrame has 2 possible layouts:
// - interpreter - separate VRegs and reference arrays. References are in the reference array.
// - JNI - just VRegs, but where every VReg holds a reference.
@@ -272,6 +341,10 @@ class ShadowFrame {
}
}
+ LockCountData& GetLockCountData() {
+ return lock_count_data_;
+ }
+
static size_t LinkOffset() {
return OFFSETOF_MEMBER(ShadowFrame, link_);
}
@@ -330,6 +403,7 @@ class ShadowFrame {
ShadowFrame* link_;
ArtMethod* method_;
uint32_t dex_pc_;
+ LockCountData lock_count_data_; // This may contain GC roots when lock counting is active.
// This is a two-part array:
// - [0..number_of_vregs) holds the raw virtual registers, and each element here is always 4
@@ -644,6 +718,10 @@ class StackVisitor {
return cur_shadow_frame_;
}
+ bool IsCurrentFrameInInterpreter() const {
+ return cur_shadow_frame_ != nullptr;
+ }
+
HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
ArtMethod** sp = GetCurrentQuickFrame();
// Skip ArtMethod*; handle scope comes next;
@@ -657,6 +735,8 @@ class StackVisitor {
static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtCode GetCurrentCode() const { return ArtCode(cur_quick_frame_); }
+
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 65f71efc06..8e0c288185 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -32,6 +32,7 @@
#include <sstream>
#include "arch/context.h"
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/bit_utils.h"
@@ -66,6 +67,7 @@
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
#include "stack.h"
+#include "stack_map.h"
#include "thread_list.h"
#include "thread-inl.h"
#include "utils.h"
@@ -106,19 +108,17 @@ static void UnimplementedEntryPoint() {
UNIMPLEMENTED(FATAL);
}
-void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
- QuickEntryPoints* qpoints);
+void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints);
void Thread::InitTlsEntryPoints() {
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
- uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
+ uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
sizeof(tlsPtr_.quick_entrypoints));
for (uintptr_t* it = begin; it != end; ++it) {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
- InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
- &tlsPtr_.quick_entrypoints);
+ InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints);
}
void Thread::InitStringEntryPoints() {
@@ -732,6 +732,18 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g
// a native peer!
if (create_peer) {
self->CreatePeer(thread_name, as_daemon, thread_group);
+ if (self->IsExceptionPending()) {
+ // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it.
+ {
+ ScopedObjectAccess soa(self);
+ LOG(ERROR) << "Exception creating thread peer:";
+ LOG(ERROR) << self->GetException()->Dump();
+ self->ClearException();
+ }
+ runtime->GetThreadList()->Unregister(self);
+ // Unregister deletes self, no need to do this here.
+ return nullptr;
+ }
} else {
// These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
if (thread_name != nullptr) {
@@ -790,7 +802,9 @@ void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group)
WellKnownClasses::java_lang_Thread,
WellKnownClasses::java_lang_Thread_init,
thread_group, thread_name.get(), thread_priority, thread_is_daemon);
- AssertNoPendingException();
+ if (IsExceptionPending()) {
+ return;
+ }
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
@@ -1481,7 +1495,9 @@ void Thread::DumpStack(std::ostream& os) const {
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_for_abort || ShouldShowNativeStack(this)) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- DumpNativeStack(os, GetTid(), " native: ", GetCurrentMethod(nullptr, !dump_for_abort));
+ ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
+ ArtCode art_code(method);
+ DumpNativeStack(os, GetTid(), " native: ", method, &art_code);
}
DumpJavaStack(os);
} else {
@@ -1538,6 +1554,7 @@ void Thread::FinishStartup() {
// Finish attaching the main thread.
ScopedObjectAccess soa(Thread::Current());
Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
+ Thread::Current()->AssertNoPendingException();
Runtime::Current()->GetClassLinker()->RunRootClinits();
}
@@ -2367,15 +2384,6 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
#undef DO_THREAD_OFFSET
-#define INTERPRETER_ENTRY_POINT_INFO(x) \
- if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
- os << #x; \
- return; \
- }
- INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
- INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
-#undef INTERPRETER_ENTRY_POINT_INFO
-
#define JNI_ENTRY_POINT_INFO(x) \
if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
os << #x; \
@@ -2647,7 +2655,7 @@ class ReferenceMapVisitor : public StackVisitor {
} else {
// Java method.
// Portable path use DexGcMap and store in Method.native_gc_map_.
- const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(gc_map != nullptr) << PrettyMethod(m);
verifier::DexPcToReferenceMap dex_gc_map(gc_map);
uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -2667,6 +2675,8 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
}
+ // Mark lock count map required for structured locking checks.
+ shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this);
}
private:
@@ -2692,13 +2702,11 @@ class ReferenceMapVisitor : public StackVisitor {
// Process register map (which native and runtime methods don't have)
if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
- if (m->IsOptimized(sizeof(void*))) {
+ if (GetCurrentCode().IsOptimized(sizeof(void*))) {
auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
+ uintptr_t native_pc_offset = GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
+ CodeInfo code_info = GetCurrentCode().GetOptimizedCodeInfo();
StackMapEncoding encoding = code_info.ExtractEncoding();
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
@@ -2728,7 +2736,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
} else {
- const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
+ const uint8_t* native_gc_map = GetCurrentCode().GetNativeGcMap(sizeof(void*));
CHECK(native_gc_map != nullptr) << PrettyMethod(m);
const DexFile::CodeItem* code_item = m->GetCodeItem();
// Can't be null or how would we compile its instructions?
@@ -2736,14 +2744,12 @@ class ReferenceMapVisitor : public StackVisitor {
NativePcOffsetToReferenceMap map(native_gc_map);
size_t num_regs = map.RegWidth() * 8;
if (num_regs > 0) {
- Runtime* runtime = Runtime::Current();
- const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
- uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
+ uintptr_t native_pc_offset =
+ GetCurrentCode().NativeQuickPcOffset(GetCurrentQuickFramePc());
const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
DCHECK(reg_bitmap != nullptr);
- const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
- const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
- QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ const VmapTable vmap_table(GetCurrentCode().GetVmapTable(sizeof(void*)));
+ QuickMethodFrameInfo frame_info = GetCurrentCode().GetQuickFrameInfo();
// For all dex registers in the bitmap
DCHECK(cur_quick_frame != nullptr);
for (size_t reg = 0; reg < num_regs; ++reg) {
diff --git a/runtime/thread.h b/runtime/thread.h
index d262c62224..8f3461acdf 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -30,7 +30,6 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "entrypoints/interpreter/interpreter_entrypoints.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
@@ -580,12 +579,6 @@ class Thread {
}
template<size_t pointer_size>
- static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
- return ThreadOffsetFromTlsPtr<pointer_size>(
- OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
- }
-
- template<size_t pointer_size>
static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
return ThreadOffsetFromTlsPtr<pointer_size>(
OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
@@ -633,6 +626,24 @@ class Thread {
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
}
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ rosalloc_runs));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_top));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_end));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
@@ -1329,7 +1340,6 @@ class Thread {
// Entrypoint function pointers.
// TODO: move this to more of a global offset table model to avoid per-thread duplication.
- InterpreterEntryPoints interpreter_entrypoints;
JniEntryPoints jni_entrypoints;
QuickEntryPoints quick_entrypoints;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 745aa6386e..ab342aa882 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -752,26 +752,31 @@ void Trace::FinishTracing() {
}
}
-void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc) {
- UNUSED(thread, this_object, method, new_dex_pc);
+void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t new_dex_pc) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
- const JValue& field_value)
+void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED,
+ const JValue& field_value ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -804,9 +809,9 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 27daceaaf2..40cd6d340c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -25,6 +25,7 @@
#include <unistd.h>
#include <memory>
+#include "art_code.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
#include "base/stl_util.h"
@@ -1092,7 +1093,7 @@ static void Addr2line(const std::string& map_src, uintptr_t offset, std::ostream
#endif
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
- ArtMethod* current_method, void* ucontext_ptr) {
+ ArtMethod* current_method, ArtCode* current_code, void* ucontext_ptr) {
#if __linux__
// b/18119146
if (RUNNING_ON_MEMORY_TOOL != 0) {
@@ -1148,8 +1149,8 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
try_addr2line = true;
} else if (
current_method != nullptr && Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- current_method->PcIsWithinQuickCode(it->pc)) {
- const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
+ current_code->PcIsWithinQuickCode(it->pc)) {
+ const void* start_of_code = current_code->GetQuickOatEntryPoint(sizeof(void*));
os << JniLongName(current_method) << "+"
<< (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
} else {
@@ -1163,7 +1164,7 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
}
}
#else
- UNUSED(os, tid, prefix, current_method, ucontext_ptr);
+ UNUSED(os, tid, prefix, current_method, current_code, ucontext_ptr);
#endif
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 3e618247d0..457d43f312 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -33,6 +33,7 @@
namespace art {
+class ArtCode;
class ArtField;
class ArtMethod;
class DexFile;
@@ -221,7 +222,7 @@ void SetThreadName(const char* thread_name);
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
- ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+ ArtMethod* current_method = nullptr, ArtCode* current_code = nullptr, void* ucontext = nullptr)
NO_THREAD_SAFETY_ANALYSIS;
// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
@@ -271,23 +272,21 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg);
class VoidFunctor {
public:
template <typename A>
- inline void operator() (A a) const {
- UNUSED(a);
+ inline void operator() (A a ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B>
- inline void operator() (A a, B b) const {
- UNUSED(a, b);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B, typename C>
- inline void operator() (A a, B b, C c) const {
- UNUSED(a, b, c);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED, C c ATTRIBUTE_UNUSED) const {
}
};
-template <typename Alloc>
-void Push32(std::vector<uint8_t, Alloc>* buf, int32_t data) {
+template <typename Vector>
+void Push32(Vector* buf, int32_t data) {
+ static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
buf->push_back(data & 0xff);
buf->push_back((data >> 8) & 0xff);
buf->push_back((data >> 16) & 0xff);
@@ -305,6 +304,14 @@ static inline constexpr bool ValidPointerSize(size_t pointer_size) {
void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+static inline const void* EntryPointToCodePointer(const void* entry_point) {
+ uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
+ // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
+ // least 2 byte aligned.
+ code &= ~0x1;
+ return reinterpret_cast<const void*>(code);
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index 4f662d5a8f..90e24b9632 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -27,20 +27,25 @@
namespace art {
-inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size,
+ const DexFile::Header& header)
: pointer_size_(pointer_size),
/* types_offset_ is always 0u, so it's constexpr */
methods_offset_(types_offset_ +
- RoundUp(TypesSize(dex_file->NumTypeIds()), MethodsAlignment())),
+ RoundUp(TypesSize(header.type_ids_size_), MethodsAlignment())),
strings_offset_(methods_offset_ +
- RoundUp(MethodsSize(dex_file->NumMethodIds()), StringsAlignment())),
+ RoundUp(MethodsSize(header.method_ids_size_), StringsAlignment())),
fields_offset_(strings_offset_ +
- RoundUp(StringsSize(dex_file->NumStringIds()), FieldsAlignment())),
+ RoundUp(StringsSize(header.string_ids_size_), FieldsAlignment())),
size_(fields_offset_ +
- RoundUp(FieldsSize(dex_file->NumFieldIds()), Alignment())) {
+ RoundUp(FieldsSize(header.field_ids_size_), Alignment())) {
DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
}
+inline DexCacheArraysLayout::DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file)
+ : DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
+}
+
inline size_t DexCacheArraysLayout::Alignment() const {
// GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index d50be5ac03..cd84460c3b 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
#define ART_RUNTIME_UTILS_DEX_CACHE_ARRAYS_LAYOUT_H_
+#include "dex_file.h"
+
namespace art {
/**
@@ -36,6 +38,9 @@ class DexCacheArraysLayout {
size_(0u) {
}
+ // Construct a layout for a particular dex file header.
+ DexCacheArraysLayout(size_t pointer_size, const DexFile::Header& header);
+
// Construct a layout for a particular dex file.
DexCacheArraysLayout(size_t pointer_size, const DexFile* dex_file);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index eed3e22a72..4051a1cbe6 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -30,6 +30,7 @@
#include "dex_instruction-inl.h"
#include "dex_instruction_utils.h"
#include "dex_instruction_visitor.h"
+#include "experimental_flags.h"
#include "gc/accounting/card_table-inl.h"
#include "indenter.h"
#include "intern_table.h"
@@ -560,6 +561,7 @@ SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
+ Runtime* runtime = Runtime::Current();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* method_name = dex_file_->StringDataByIdx(method_id.name_idx_);
bool instance_constructor_by_name = strcmp("<init>", method_name) == 0;
@@ -628,9 +630,13 @@ bool MethodVerifier::Verify() {
}
}
if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
- // Interface methods must be public and abstract.
- if ((method_access_flags_ & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public and abstract";
+ // Interface methods must be public and abstract (if default methods are disabled).
+ bool default_methods_supported =
+ runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods);
+ uint32_t kRequired = kAccPublic | (default_methods_supported ? 0 : kAccAbstract);
+ if ((method_access_flags_ & kRequired) != kRequired) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be public"
+ << (default_methods_supported ? "" : " and abstract");
return false;
}
// In addition to the above, interface methods must not be protected.
@@ -657,10 +663,22 @@ bool MethodVerifier::Verify() {
return false;
}
- // Only the static initializer may have code in an interface.
if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
- // Interfaces may have static initializers for their fields.
- if (!IsConstructor() || !IsStatic()) {
+ // Interfaces may always have static initializers for their fields. If we are running with
+ // default methods enabled we also allow other public, static, non-final methods to have code.
+ // Otherwise that is the only type of method allowed.
+ if (runtime->AreExperimentalFlagsEnabled(ExperimentalFlags::kDefaultMethods)) {
+ if (IsInstanceConstructor()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-static constructor";
+ return false;
+ } else if (method_access_flags_ & kAccFinal) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have final methods";
+ return false;
+ } else if (!(method_access_flags_ & kAccPublic)) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interfaces may not have non-public members";
+ return false;
+ }
+ } else if (!IsConstructor() || !IsStatic()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "interface methods must be abstract";
return false;
}
@@ -682,6 +700,7 @@ bool MethodVerifier::Verify() {
<< " regs=" << code_item_->registers_size_;
return false;
}
+
// Allocate and initialize an array to hold instruction data.
insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
// Run through the instructions and see if the width checks out.
@@ -693,8 +712,8 @@ bool MethodVerifier::Verify() {
// Perform code-flow analysis and return.
result = result && VerifyCodeFlow();
// Compute information for compiler.
- if (result && Runtime::Current()->IsCompiler()) {
- result = Runtime::Current()->GetCompilerCallbacks()->MethodVerified(this);
+ if (result && runtime->IsCompiler()) {
+ result = runtime->GetCompilerCallbacks()->MethodVerified(this);
}
return result;
}
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 33c90e3000..f48b1e1212 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -338,6 +338,8 @@ void RegisterLine::CheckLiteralOp(MethodVerifier* verifier, const Instruction* i
}
}
+static constexpr uint32_t kVirtualNullRegister = std::numeric_limits<uint32_t>::max();
+
void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) {
const RegType& reg_type = GetRegisterType(verifier, reg_idx);
if (!reg_type.IsReferenceTypes()) {
@@ -352,6 +354,12 @@ void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32
}
} else {
if (SetRegToLockDepth(reg_idx, monitors_.size())) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ if (reg_type.IsZero()) {
+ SetRegToLockDepth(kVirtualNullRegister, monitors_.size());
+ }
+
monitors_.push_back(insn_idx);
} else {
verifier->Fail(VERIFY_ERROR_LOCKING);
@@ -377,7 +385,19 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
}
} else {
monitors_.pop_back();
- if (!IsSetLockDepth(reg_idx, monitors_.size())) {
+
+ bool success = IsSetLockDepth(reg_idx, monitors_.size());
+
+ if (!success && reg_type.IsZero()) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ success = IsSetLockDepth(kVirtualNullRegister, monitors_.size());
+ if (success) {
+ reg_idx = kVirtualNullRegister;
+ }
+ }
+
+ if (!success) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "monitor-exit not unlocking the top of the monitor stack while verifying "
@@ -385,12 +405,41 @@ void RegisterLine::PopMonitor(MethodVerifier* verifier, uint32_t reg_idx) {
*verifier->GetMethodReference().dex_file);
}
} else {
- // Record the register was unlocked
+ // Record the register was unlocked. This clears all aliases, thus it will also clear the
+ // null lock, if necessary.
ClearRegToLockDepth(reg_idx, monitors_.size());
}
}
}
+// Check whether there is another register in the search map that is locked the same way as the
+// register in the src map. This establishes an alias.
+static bool FindLockAliasedRegister(
+ uint32_t src,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
+ const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+ auto it = src_map.find(src);
+ if (it == src_map.end()) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+ uint32_t src_lock_levels = it->second;
+ if (src_lock_levels == 0) {
+ // "Not locked" is trivially aliased.
+ return true;
+ }
+
+ // Scan the map for the same value.
+ for (const std::pair<uint32_t, uint32_t>& pair : search_map) {
+ if (pair.first != src && pair.second == src_lock_levels) {
+ return true;
+ }
+ }
+
+ // Nothing found, no alias.
+ return false;
+}
+
bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line) {
bool changed = false;
DCHECK(incoming_line != nullptr);
@@ -417,9 +466,29 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
size_t depths = reg_to_lock_depths_.count(idx);
size_t incoming_depths = incoming_line->reg_to_lock_depths_.count(idx);
if (depths != incoming_depths) {
- if (depths == 0 || incoming_depths == 0) {
- reg_to_lock_depths_.erase(idx);
- } else {
+ // Stack levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // | |
+ // v0 = v1 {v0=1, v1=1} v0 = v2 {v1=1}
+ // | |
+ // {v1=1}
+ // // Dropping v0, as the status can't be merged
+ // // but the lock info ("locked at depth 1" and)
+ // // "not locked at all") is available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "mismatched stack depths for register v" << idx
@@ -429,20 +498,51 @@ bool RegisterLine::MergeRegisters(MethodVerifier* verifier, const RegisterLine*
}
break;
}
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
} else if (depths > 0) {
// Check whether they're actually the same levels.
uint32_t locked_levels = reg_to_lock_depths_.find(idx)->second;
uint32_t incoming_locked_levels = incoming_line->reg_to_lock_depths_.find(idx)->second;
if (locked_levels != incoming_locked_levels) {
- verifier->Fail(VERIFY_ERROR_LOCKING);
- if (kDumpLockFailures) {
- LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
- << std::hex << locked_levels << std::dec << " != "
- << std::hex << incoming_locked_levels << std::dec << " in "
- << PrettyMethod(verifier->GetMethodReference().dex_method_index,
- *verifier->GetMethodReference().dex_file);
+ // Lock levels aren't matching. This is potentially bad, as we don't do a
+ // flow-sensitive analysis.
+ // However, this could be an alias of something locked in one path, and the alias was
+ // destroyed in another path. It is fine to drop this as long as there's another alias
+ // for the lock around. The last vanishing alias will then report that things would be
+ // left unlocked. We need to check for aliases for both lock levels.
+ //
+ // Example (lock status in curly braces as pair of register and lock leels):
+ //
+ // lock v1 {v1=1}
+ // lock v2 {v1=1, v2=2}
+ // | |
+ // v0 = v1 {v0=1, v1=1, v2=2} v0 = v2 {v0=2, v1=1, v2=2}
+ // | |
+ // {v1=1, v2=2}
+ // // Dropping v0, as the status can't be
+ // // merged but the lock info ("locked at
+ // // depth 1" and "locked at depth 2") is
+ // // available.
+ if (!FindLockAliasedRegister(idx,
+ reg_to_lock_depths_,
+ reg_to_lock_depths_) ||
+ !FindLockAliasedRegister(idx,
+ incoming_line->reg_to_lock_depths_,
+ reg_to_lock_depths_)) {
+ // No aliases for both current and incoming, we'll lose information.
+ verifier->Fail(VERIFY_ERROR_LOCKING);
+ if (kDumpLockFailures) {
+ LOG(WARNING) << "mismatched lock levels for register v" << idx << ": "
+ << std::hex << locked_levels << std::dec << " != "
+ << std::hex << incoming_locked_levels << std::dec << " in "
+ << PrettyMethod(verifier->GetMethodReference().dex_method_index,
+ *verifier->GetMethodReference().dex_file);
+ }
+ break;
}
- break;
+ // We found aliases, set this to zero.
+ reg_to_lock_depths_.erase(idx);
}
}
}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index dd88db0b7c..ee3a3b9830 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -120,7 +120,7 @@ public class Main {
private static void testRemoveLocalObject() {
removeLocalObject(new Object());
}
-
+
private static native short shortMethod(short s1, short s2, short s3, short s4, short s5, short s6, short s7,
short s8, short s9, short s10);
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 285df18c72..f8d321cbec 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -22,11 +22,11 @@ namespace art {
#define CHECK_REGS_CONTAIN_REFS(dex_pc, abort_if_not_found, ...) do { \
int t[] = {__VA_ARGS__}; \
int t_size = sizeof(t) / sizeof(*t); \
- uintptr_t native_quick_pc = m->ToNativeQuickPc(dex_pc, \
+ uintptr_t native_quick_pc = GetCurrentCode().ToNativeQuickPc(dex_pc, \
/* is_catch_handler */ false, \
abort_if_not_found); \
if (native_quick_pc != UINTPTR_MAX) { \
- CheckReferences(t, t_size, m->NativeQuickPcOffset(native_quick_pc)); \
+ CheckReferences(t, t_size, GetCurrentCode().NativeQuickPcOffset(native_quick_pc)); \
} \
} while (false);
@@ -49,7 +49,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
CHECK_REGS_CONTAIN_REFS(0x06U, true, 8, 1); // v8: this, v1: x
CHECK_REGS_CONTAIN_REFS(0x08U, true, 8, 3, 1); // v8: this, v3: y, v1: x
CHECK_REGS_CONTAIN_REFS(0x0cU, true, 8, 3, 1); // v8: this, v3: y, v1: x
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
CHECK_REGS_CONTAIN_REFS(0x0eU, true, 8, 3, 1); // v8: this, v3: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x10U, true, 8, 3, 1); // v8: this, v3: y, v1: x
@@ -65,7 +65,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
// Note that v0: ex can be eliminated because it's a dead merge of two different exceptions.
CHECK_REGS_CONTAIN_REFS(0x18U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1aU, true, 8, 5, 2, 1); // v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
// v8: this, v5: x[1], v2: y, v1: x (dead v0: ex)
CHECK_REGS_CONTAIN_REFS(0x1dU, true, 8, 5, 2, 1);
// v5 is removed from the root set because there is a "merge" operation.
@@ -74,7 +74,7 @@ struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
}
CHECK_REGS_CONTAIN_REFS(0x21U, true, 8, 2, 1); // v8: this, v2: y, v1: x (dead v0: ex)
- if (!m->IsOptimized(sizeof(void*))) {
+ if (!GetCurrentCode().IsOptimized(sizeof(void*))) {
CHECK_REGS_CONTAIN_REFS(0x27U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
}
CHECK_REGS_CONTAIN_REFS(0x29U, true, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
diff --git a/test/079-phantom/src/Bitmap.java b/test/079-phantom/src/Bitmap.java
index 85eb3ccb97..ff43749e76 100644
--- a/test/079-phantom/src/Bitmap.java
+++ b/test/079-phantom/src/Bitmap.java
@@ -125,7 +125,6 @@ class PhantomWrapper extends PhantomReference {
*/
class BitmapWatcher extends Thread {
ReferenceQueue<PhantomWrapper> mQueue;
- volatile boolean mQuit = false;
BitmapWatcher(ReferenceQueue<PhantomWrapper> queue) {
mQueue = queue;
@@ -133,7 +132,7 @@ class BitmapWatcher extends Thread {
}
public void run() {
- while (!mQuit) {
+ while (true) {
try {
PhantomWrapper ref = (PhantomWrapper) mQueue.remove();
//System.out.println("dequeued ref " + ref.mNativeData +
@@ -142,12 +141,12 @@ class BitmapWatcher extends Thread {
//ref.clear();
} catch (InterruptedException ie) {
System.out.println("intr");
+ break;
}
}
}
public void shutDown() {
- mQuit = true;
interrupt();
}
}
diff --git a/test/088-monitor-verification/smali/NullLocks.smali b/test/088-monitor-verification/smali/NullLocks.smali
new file mode 100644
index 0000000000..8262f19e22
--- /dev/null
+++ b/test/088-monitor-verification/smali/NullLocks.smali
@@ -0,0 +1,28 @@
+.class public LNullLocks;
+
+.super Ljava/lang/Object;
+
+.method public static run(Z)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertIsManaged()V
+
+ if-eqz v2, :Lfalse
+
+ const v0, 0 # Null.
+ monitor-enter v0
+ const v1, 0 # Another null. This should be detected as an alias, such that the exit
+ # will not fail verification.
+ monitor-exit v1
+
+ monitor-enter v0
+ monitor-exit v1
+
+ monitor-enter v1
+ monitor-exit v0
+
+:Lfalse
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index 218805543e..212c894bd5 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -27,6 +27,13 @@ public class Main {
*/
public static void main(String[] args) {
System.loadLibrary(args[0]);
+ if (!hasOatFile() || runtimeIsSoftFail() || isInterpreted()) {
+ // Some tests ensure that the verifier was able to guarantee balanced locking by
+ // asserting that the test function is running as compiled code. But skip this now,
+ // as this seems to be a non-compiled code test configuration.
+ disableStackFrameAsserts();
+ }
+
Main m = new Main();
m.recursiveSync(0);
@@ -49,7 +56,7 @@ public class Main {
Object obj1 = new Object();
Object obj2 = new Object();
- m.twoPath(obj1, obj2, 0);
+ TwoPath.twoPath(obj1, obj2, 0);
System.out.println("twoPath ok");
m.triplet(obj1, obj2, 0);
@@ -62,6 +69,7 @@ public class Main {
* Recursive synchronized method.
*/
synchronized void recursiveSync(int iter) {
+ assertIsManaged();
if (iter < 40) {
recursiveSync(iter+1);
} else {
@@ -73,6 +81,7 @@ public class Main {
* Tests simple nesting, with and without a throw.
*/
void nestedMayThrow(boolean doThrow) {
+ assertIsManaged();
synchronized (this) {
synchronized (Main.class) {
synchronized (new Object()) {
@@ -90,6 +99,7 @@ public class Main {
* Exercises bug 3215458.
*/
void constantLock() {
+ assertIsManaged();
Class thing = Thread.class;
synchronized (Thread.class) {}
}
@@ -98,6 +108,7 @@ public class Main {
* Confirms that we can have 32 nested monitors on one method.
*/
void notExcessiveNesting() {
+ assertIsManaged();
synchronized (this) { // 1
synchronized (this) { // 2
synchronized (this) { // 3
@@ -138,6 +149,7 @@ public class Main {
* method.
*/
void notNested() {
+ assertIsManaged();
synchronized (this) {} // 1
synchronized (this) {} // 2
synchronized (this) {} // 3
@@ -178,25 +190,6 @@ public class Main {
private void doNothing(Object obj) {}
/**
- * Conditionally uses one of the synchronized objects.
- */
- public void twoPath(Object obj1, Object obj2, int x) {
- Object localObj;
-
- synchronized (obj1) {
- synchronized(obj2) {
- if (x == 0) {
- localObj = obj2;
- } else {
- localObj = obj1;
- }
- }
- }
-
- doNothing(localObj);
- }
-
- /**
* Lock the monitor two or three times, and make use of the locked or
* unlocked object.
*/
@@ -220,19 +213,16 @@ public class Main {
// Smali testing code.
private static void runSmaliTests() {
- if (!hasOatFile() || runtimeIsSoftFail() || isInterpreted()) {
- // Skip test, this seems to be a non-compiled code test configuration.
- return;
- }
-
runTest("OK", new Object[] { new Object(), new Object() }, null);
runTest("TooDeep", new Object[] { new Object() }, null);
runTest("NotStructuredOverUnlock", new Object[] { new Object() },
IllegalMonitorStateException.class);
- runTest("NotStructuredUnderUnlock", new Object[] { new Object() }, null);
- // TODO: new IllegalMonitorStateException());
+ runTest("NotStructuredUnderUnlock", new Object[] { new Object() },
+ IllegalMonitorStateException.class);
runTest("UnbalancedJoin", new Object[] { new Object(), new Object() }, null);
runTest("UnbalancedStraight", new Object[] { new Object(), new Object() }, null);
+ runTest("NullLocks", new Object[] { false }, null);
+ runTest("NullLocks", new Object[] { true }, NullPointerException.class);
}
private static void runTest(String className, Object[] parameters, Class<?> excType) {
@@ -282,4 +272,5 @@ public class Main {
public static native boolean hasOatFile();
public static native boolean runtimeIsSoftFail();
public static native boolean isInterpreted();
+ public static native void disableStackFrameAsserts();
}
diff --git a/test/088-monitor-verification/src/TwoPath.java b/test/088-monitor-verification/src/TwoPath.java
new file mode 100644
index 0000000000..bdc15ad82e
--- /dev/null
+++ b/test/088-monitor-verification/src/TwoPath.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+
+/*
+ * Test case for conditionally using one of two synchronized objects.
+ *
+ * This code cannot be verified at the moment, as the join point merges a register with two
+ * different lock options. Do not put it into Main to avoid the whole class being run in the
+ * interpreter.
+ */
+public class TwoPath {
+
+ /**
+ * Conditionally uses one of the synchronized objects.
+ */
+ public static void twoPath(Object obj1, Object obj2, int x) {
+ Main.assertIsManaged();
+
+ Object localObj;
+
+ synchronized (obj1) {
+ synchronized(obj2) {
+ if (x == 0) {
+ localObj = obj2;
+ } else {
+ localObj = obj1;
+ }
+ }
+ }
+
+ doNothing(localObj);
+ }
+
+ private static void doNothing(Object o) {
+ }
+}
diff --git a/test/131-structural-change/expected.txt b/test/131-structural-change/expected.txt
index cc7713d252..1d19278f1e 100644
--- a/test/131-structural-change/expected.txt
+++ b/test/131-structural-change/expected.txt
@@ -1,2 +1,3 @@
+JNI_OnLoad called
Should really reach here.
Done.
diff --git a/test/131-structural-change/src/Main.java b/test/131-structural-change/src/Main.java
index 6cbbd12387..c7488992df 100644
--- a/test/131-structural-change/src/Main.java
+++ b/test/131-structural-change/src/Main.java
@@ -35,7 +35,7 @@ public class Main {
e.printStackTrace(System.out);
}
- boolean haveOatFile = hasOat();
+ boolean haveOatFile = hasOatFile();
boolean gotError = false;
try {
Class<?> bClass = getClass().getClassLoader().loadClass("B");
@@ -45,10 +45,10 @@ public class Main {
e.printStackTrace(System.out);
}
if (haveOatFile ^ gotError) {
- System.out.println("Did not get expected error.");
+ System.out.println("Did not get expected error. " + haveOatFile + " " + gotError);
}
System.out.println("Done.");
}
- private native static boolean hasOat();
+ private native static boolean hasOatFile();
}
diff --git a/test/141-class-unload/expected.txt b/test/141-class-unload/expected.txt
index 53d7abecaf..11de660c43 100644
--- a/test/141-class-unload/expected.txt
+++ b/test/141-class-unload/expected.txt
@@ -21,3 +21,4 @@ null
JNI_OnLoad called
class null false test
JNI_OnUnload called
+Number of loaded unload-ex maps 0
diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java
index 3cc43accbe..0640b364c9 100644
--- a/test/141-class-unload/src/Main.java
+++ b/test/141-class-unload/src/Main.java
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
import java.lang.ref.WeakReference;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
@@ -43,11 +46,28 @@ public class Main {
testStackTrace(constructor);
// Stress test to make sure we dont leak memory.
stressTest(constructor);
+ // Test that the oat files are unloaded.
+ testOatFilesUnloaded(getPid());
} catch (Exception e) {
System.out.println(e);
}
}
+ private static void testOatFilesUnloaded(int pid) throws Exception {
+ BufferedReader reader = new BufferedReader(new FileReader ("/proc/" + pid + "/maps"));
+ String line;
+ int count = 0;
+ Runtime.getRuntime().gc();
+ System.runFinalization();
+ while ((line = reader.readLine()) != null) {
+ if (line.contains("@141-class-unload-ex.jar")) {
+ System.out.println(line);
+ ++count;
+ }
+ }
+ System.out.println("Number of loaded unload-ex maps " + count);
+ }
+
private static void stressTest(Constructor constructor) throws Exception {
for (int i = 0; i <= 100; ++i) {
setUpUnloadLoader(constructor, false);
@@ -163,4 +183,8 @@ public class Main {
loadLibrary.invoke(intHolder, nativeLibraryName);
return new WeakReference(loader);
}
+
+ private static int getPid() throws Exception {
+ return Integer.parseInt(new File("/proc/self").getCanonicalFile().getName());
+ }
}
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 9facfdb076..0ee2ff9fda 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -45,10 +46,14 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVReg(m, 1, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
success = GetVReg(m, 2, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
CHECK(GetVReg(m, 3, kReferenceVReg, &value));
CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value_);
@@ -78,10 +83,14 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 42u);
bool success = GetVRegPair(m, 2, kLongLoVReg, kLongHiVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
success = GetVRegPair(m, 4, kLongLoVReg, kLongHiVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
uint32_t value32 = 0;
CHECK(GetVReg(m, 6, kReferenceVReg, &value32));
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index c21168b81e..6fcebdb8b5 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -63,7 +64,9 @@ class TestVisitor : public StackVisitor {
CHECK_EQ(value, 1u);
bool success = GetVReg(m, 2, kIntVReg, &value);
- if (m->IsOptimized(sizeof(void*))) CHECK(!success);
+ if (!IsCurrentFrameInInterpreter() && GetCurrentCode().IsOptimized(sizeof(void*))) {
+ CHECK(!success);
+ }
CHECK(GetVReg(m, 3, kReferenceVReg, &value));
CHECK_EQ(value, 1u);
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 7e9a583faf..2a56a7fce7 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -15,6 +15,7 @@
*/
#include "arch/context.h"
+#include "art_code.h"
#include "art_method-inl.h"
#include "jni.h"
#include "scoped_thread_state_change.h"
@@ -43,7 +44,7 @@ class TestVisitor : public StackVisitor {
found_method_ = true;
uint32_t value = 0;
if (GetCurrentQuickFrame() != nullptr &&
- m->IsOptimized(sizeof(void*)) &&
+ GetCurrentCode().IsOptimized(sizeof(void*)) &&
!Runtime::Current()->IsDebuggable()) {
CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false);
} else {
diff --git a/test/529-checker-unresolved/expected.txt b/test/529-checker-unresolved/expected.txt
index 358048c75b..1e7dbfed2e 100644
--- a/test/529-checker-unresolved/expected.txt
+++ b/test/529-checker-unresolved/expected.txt
@@ -3,3 +3,5 @@ UnresolvedClass.staticMethod()
UnresolvedClass.virtualMethod()
UnresolvedClass.interfaceMethod()
UnresolvedClass.superMethod()
+instanceof ok
+checkcast ok
diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java
index adb5adae82..5219c04c37 100644
--- a/test/529-checker-unresolved/src/Main.java
+++ b/test/529-checker-unresolved/src/Main.java
@@ -114,16 +114,30 @@ public class Main extends UnresolvedSuperClass {
expectEquals(o, c.instanceObject);
}
+ static public void testInstanceOf(Object o) {
+ if (o instanceof UnresolvedSuperClass) {
+ System.out.println("instanceof ok");
+ }
+ }
+
+ static public UnresolvedSuperClass testCheckCast(Object o) {
+ UnresolvedSuperClass c = (UnresolvedSuperClass) o;
+ System.out.println("checkcast ok");
+ return c;
+ }
/// CHECK-START: void Main.main(java.lang.String[]) register (before)
/// CHECK: InvokeUnresolved invoke_type:direct
static public void main(String[] args) {
UnresolvedClass c = new UnresolvedClass();
+ Main m = new Main();
callInvokeUnresolvedStatic();
callInvokeUnresolvedVirtual(c);
callInvokeUnresolvedInterface(c);
- callInvokeUnresolvedSuper(new Main());
+ callInvokeUnresolvedSuper(m);
callUnresolvedStaticFieldAccess();
callUnresolvedInstanceFieldAccess(c);
+ testInstanceOf(m);
+ testCheckCast(m);
}
public static void expectEquals(byte expected, byte result) {
diff --git a/test/529-checker-unresolved/src/Unresolved.java b/test/529-checker-unresolved/src/Unresolved.java
index 03ceb6857b..20ac6e0b89 100644
--- a/test/529-checker-unresolved/src/Unresolved.java
+++ b/test/529-checker-unresolved/src/Unresolved.java
@@ -58,13 +58,3 @@ class UnresolvedClass extends UnresolvedSuperClass implements UnresolvedInterfac
public Object instanceObject;
}
-final class UnresolvedFinalClass {
- public void directMethod() {
- System.out.println("UnresolvedFinalClass.directMethod()");
- }
-}
-
-class UnresolvedAtRuntime {
- public void unresolvedAtRuntime() { }
-}
-
diff --git a/test/536-checker-intrinsic-optimization/expected.txt b/test/536-checker-intrinsic-optimization/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/536-checker-intrinsic-optimization/expected.txt
diff --git a/test/536-checker-intrinsic-optimization/info.txt b/test/536-checker-intrinsic-optimization/info.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/536-checker-intrinsic-optimization/info.txt
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
new file mode 100644
index 0000000000..1b784ae367
--- /dev/null
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+ public static void main(String[] args) {
+ stringEqualsSame();
+ stringArgumentNotNull("Foo");
+ }
+
+ /// CHECK-START: boolean Main.stringEqualsSame() instruction_simplifier (before)
+ /// CHECK: InvokeStaticOrDirect
+
+ /// CHECK-START: boolean Main.stringEqualsSame() register (before)
+ /// CHECK: <<Const1:i\d+>> IntConstant 1
+ /// CHECK: Return [<<Const1>>]
+
+ /// CHECK-START: boolean Main.stringEqualsSame() register (before)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ public static boolean stringEqualsSame() {
+ return $inline$callStringEquals("obj", "obj");
+ }
+
+ /// CHECK-START: boolean Main.stringEqualsNull() register (after)
+ /// CHECK: <<Invoke:z\d+>> InvokeStaticOrDirect
+ /// CHECK: Return [<<Invoke>>]
+ public static boolean stringEqualsNull() {
+ String o = (String)myObject;
+ return $inline$callStringEquals(o, o);
+ }
+
+ public static boolean $inline$callStringEquals(String a, String b) {
+ return a.equals(b);
+ }
+
+ /// CHECK-START-X86: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: test
+ public static boolean stringArgumentNotNull(Object obj) {
+ obj.getClass();
+ return "foo".equals(obj);
+ }
+
+ // Test is very brittle as it depends on the order we emit instructions.
+ /// CHECK-START-X86: boolean Main.stringArgumentIsString() disassembly (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK: test
+ /// CHECK: jz/eq
+ // Check that we don't try to compare the classes.
+ /// CHECK-NOT: mov
+ /// CHECK: cmp
+ public static boolean stringArgumentIsString() {
+ return "foo".equals(myString);
+ }
+
+ static String myString;
+ static Object myObject;
+}
diff --git a/test/536-checker-needs-access-check/expected.txt b/test/536-checker-needs-access-check/expected.txt
new file mode 100644
index 0000000000..4acae95b70
--- /dev/null
+++ b/test/536-checker-needs-access-check/expected.txt
@@ -0,0 +1,4 @@
+Got expected error instanceof
+Got expected error instanceof null
+Got expected error checkcast null
+Got expected error instanceof (keep LoadClass with access check)
diff --git a/test/536-checker-needs-access-check/info.txt b/test/536-checker-needs-access-check/info.txt
new file mode 100644
index 0000000000..3413cf3625
--- /dev/null
+++ b/test/536-checker-needs-access-check/info.txt
@@ -0,0 +1 @@
+Verifies that we don't remove type checks when we need to check for access rights.
diff --git a/test/536-checker-needs-access-check/src/Main.java b/test/536-checker-needs-access-check/src/Main.java
new file mode 100644
index 0000000000..7bd49c1c8c
--- /dev/null
+++ b/test/536-checker-needs-access-check/src/Main.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import other.InaccessibleClass;
+import other.InaccessibleClassProxy;
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ testInstanceOf();
+ } catch (IllegalAccessError e) {
+ System.out.println("Got expected error instanceof");
+ }
+
+ try {
+ testInstanceOfNull();
+ } catch (IllegalAccessError e) {
+ System.out.println("Got expected error instanceof null");
+ }
+
+ try {
+ testCheckCastNull();
+ } catch (IllegalAccessError e) {
+ System.out.println("Got expected error checkcast null");
+ }
+
+ try {
+ testDontGvnLoadClassWithAccessChecks(new Object());
+ } catch (IllegalAccessError e) {
+ System.out.println("Got expected error instanceof (keep LoadClass with access check)");
+ }
+ }
+
+ /// CHECK-START: boolean Main.testInstanceOf() register (after)
+ /// CHECK: InstanceOf
+ public static boolean testInstanceOf() {
+ return ic instanceof InaccessibleClass;
+ }
+
+ /// CHECK-START: boolean Main.testInstanceOfNull() register (after)
+ /// CHECK: InstanceOf
+ public static boolean testInstanceOfNull() {
+ return null instanceof InaccessibleClass;
+ }
+
+ // TODO: write a test for for CheckCast with not null constant (after RTP can parse arguments).
+
+ /// CHECK-START: other.InaccessibleClass Main.testCheckCastNull() register (after)
+ /// CHECK: CheckCast
+ public static InaccessibleClass testCheckCastNull() {
+ return (InaccessibleClass) null;
+ }
+
+ /// CHECK-START: boolean Main.testDontGvnLoadClassWithAccessChecks(java.lang.Object) inliner (before)
+ /// CHECK: InvokeStaticOrDirect
+
+ /// CHECK-START: boolean Main.testDontGvnLoadClassWithAccessChecks(java.lang.Object) inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ /// CHECK-START: boolean Main.testDontGvnLoadClassWithAccessChecks(java.lang.Object) GVN (after)
+ /// CHECK: LoadClass needs_access_check:false
+ /// CHECK: LoadClass needs_access_check:true
+ public static boolean testDontGvnLoadClassWithAccessChecks(Object o) {
+ InaccessibleClassProxy.test(o);
+ return ic instanceof InaccessibleClass;
+ }
+
+ public static InaccessibleClass ic;
+}
diff --git a/test/536-checker-needs-access-check/src/other/InaccessibleClass.java b/test/536-checker-needs-access-check/src/other/InaccessibleClass.java
new file mode 100644
index 0000000000..de2e1d7830
--- /dev/null
+++ b/test/536-checker-needs-access-check/src/other/InaccessibleClass.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+public class InaccessibleClass {
+}
diff --git a/test/536-checker-needs-access-check/src/other/InaccessibleClassProxy.java b/test/536-checker-needs-access-check/src/other/InaccessibleClassProxy.java
new file mode 100644
index 0000000000..4c005e4dfe
--- /dev/null
+++ b/test/536-checker-needs-access-check/src/other/InaccessibleClassProxy.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+public class InaccessibleClassProxy {
+ public static boolean test(Object o) {
+ return o instanceof InaccessibleClass;
+ }
+}
diff --git a/test/536-checker-needs-access-check/src2/other/InaccessibleClass.java b/test/536-checker-needs-access-check/src2/other/InaccessibleClass.java
new file mode 100644
index 0000000000..273226375e
--- /dev/null
+++ b/test/536-checker-needs-access-check/src2/other/InaccessibleClass.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/*package*/ class InaccessibleClass {
+}
diff --git a/test/536-checker-needs-access-check/src2/other/InaccessibleClassProxy.java b/test/536-checker-needs-access-check/src2/other/InaccessibleClassProxy.java
new file mode 100644
index 0000000000..4c005e4dfe
--- /dev/null
+++ b/test/536-checker-needs-access-check/src2/other/InaccessibleClassProxy.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+public class InaccessibleClassProxy {
+ public static boolean test(Object o) {
+ return o instanceof InaccessibleClass;
+ }
+}
diff --git a/test/537-checker-arraycopy/expected.txt b/test/537-checker-arraycopy/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/537-checker-arraycopy/expected.txt
diff --git a/test/537-checker-arraycopy/info.txt b/test/537-checker-arraycopy/info.txt
new file mode 100644
index 0000000000..ea88f89306
--- /dev/null
+++ b/test/537-checker-arraycopy/info.txt
@@ -0,0 +1 @@
+Test for edge cases of System.arraycopy.
diff --git a/test/537-checker-arraycopy/src/Main.java b/test/537-checker-arraycopy/src/Main.java
new file mode 100644
index 0000000000..30ccc56b80
--- /dev/null
+++ b/test/537-checker-arraycopy/src/Main.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+ public static void main(String[] args) {
+ arraycopy();
+ try {
+ arraycopy(new Object());
+ throw new Error("Should not be here");
+ } catch (ArrayStoreException ase) {
+ // Ignore.
+ }
+ try {
+ arraycopy(null);
+ throw new Error("Should not be here");
+ } catch (NullPointerException npe) {
+ // Ignore.
+ }
+
+ try {
+ arraycopy(new Object[1]);
+ throw new Error("Should not be here");
+ } catch (ArrayIndexOutOfBoundsException aiooe) {
+ // Ignore.
+ }
+
+ arraycopy(new Object[2]);
+ arraycopy(new Object[2], 0);
+
+ try {
+ arraycopy(new Object[1], 1);
+ throw new Error("Should not be here");
+ } catch (ArrayIndexOutOfBoundsException aiooe) {
+ // Ignore.
+ }
+ }
+
+ /// CHECK-START-X86_64: void Main.arraycopy() disassembly (after)
+ /// CHECK: InvokeStaticOrDirect
+ /// CHECK-NOT: test
+ /// CHECK-NOT: call
+ /// CHECK: ReturnVoid
+ // Checks that the call is intrinsified and that there is no test instruction
+ // when we know the source and destination are not null.
+ public static void arraycopy() {
+ Object[] obj = new Object[4];
+ System.arraycopy(obj, 1, obj, 0, 1);
+ }
+
+ public static void arraycopy(Object obj) {
+ System.arraycopy(obj, 1, obj, 0, 1);
+ }
+
+ public static void arraycopy(Object[] obj, int pos) {
+ System.arraycopy(obj, pos, obj, 0, obj.length);
+ }
+}
diff --git a/test/537-checker-debuggable/expected.txt b/test/537-checker-debuggable/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/537-checker-debuggable/expected.txt
diff --git a/test/537-checker-debuggable/info.txt b/test/537-checker-debuggable/info.txt
new file mode 100644
index 0000000000..25597d3f13
--- /dev/null
+++ b/test/537-checker-debuggable/info.txt
@@ -0,0 +1 @@
+Test that CHECK-START-DEBUGGABLE runs only on --debuggable code. \ No newline at end of file
diff --git a/test/537-checker-debuggable/smali/TestCase.smali b/test/537-checker-debuggable/smali/TestCase.smali
new file mode 100644
index 0000000000..8e6c7ef727
--- /dev/null
+++ b/test/537-checker-debuggable/smali/TestCase.smali
@@ -0,0 +1,42 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+# The phi in this method has no actual uses but one environment use. It will
+# be eliminated in normal mode but kept live in debuggable mode. Test that
+# Checker runs the correct test for each compilation mode.
+
+## CHECK-START: int TestCase.deadPhi(int, int, int) ssa_builder (after)
+## CHECK-NOT: Phi
+
+## CHECK-START-DEBUGGABLE: int TestCase.deadPhi(int, int, int) ssa_builder (after)
+## CHECK: Phi
+
+.method public static deadPhi(III)I
+ .registers 8
+
+ move v0, p1
+ if-eqz p0, :after
+ move v0, p2
+ :after
+ # v0 = Phi [p1, p2] with no uses
+
+ invoke-static {}, Ljava/lang/System;->nanoTime()J # create an env use
+
+ :return
+ return p2
+.end method
diff --git a/test/537-checker-debuggable/src/Main.java b/test/537-checker-debuggable/src/Main.java
new file mode 100644
index 0000000000..a572648109
--- /dev/null
+++ b/test/537-checker-debuggable/src/Main.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ // Workaround for b/18051191.
+ class InnerClass {}
+
+ public static void main(String[] args) { }
+}
diff --git a/test/537-checker-inline-and-unverified/expected.txt b/test/537-checker-inline-and-unverified/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/537-checker-inline-and-unverified/expected.txt
diff --git a/test/537-checker-inline-and-unverified/info.txt b/test/537-checker-inline-and-unverified/info.txt
new file mode 100644
index 0000000000..ec12327408
--- /dev/null
+++ b/test/537-checker-inline-and-unverified/info.txt
@@ -0,0 +1 @@
+Checks that unverified methods are not inlined.
diff --git a/test/537-checker-inline-and-unverified/src/Main.java b/test/537-checker-inline-and-unverified/src/Main.java
new file mode 100644
index 0000000000..bdc14b027c
--- /dev/null
+++ b/test/537-checker-inline-and-unverified/src/Main.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import other.InaccessibleClass;
+
+public class Main {
+ public static void main(String[] args) {
+ try {
+ testNoInline();
+ } catch (IllegalAccessError e) {
+ // expected
+ }
+ testInline();
+ }
+
+ /// CHECK-START: void Main.testNoInline() inliner (before)
+ /// CHECK: InvokeStaticOrDirect method_name:Main.$opt$noinline$testNoInline
+
+ /// CHECK-START: void Main.testNoInline() inliner (after)
+ /// CHECK: InvokeStaticOrDirect method_name:Main.$opt$noinline$testNoInline
+ public static void testNoInline() {
+ $opt$noinline$testNoInline();
+ }
+
+ /// CHECK-START: void Main.testInline() inliner (before)
+ /// CHECK: InvokeStaticOrDirect method_name:Main.$opt$inline$testInline
+
+ /// CHECK-START: void Main.testInline() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ public static void testInline() {
+ $opt$inline$testInline();
+ }
+
+ public static boolean $opt$noinline$testNoInline() {
+ try {
+ return null instanceof InaccessibleClass;
+ } catch (IllegalAccessError e) {
+ // expected
+ }
+ return false;
+ }
+
+ public static boolean $opt$inline$testInline() {
+ return null instanceof Main;
+ }
+}
diff --git a/test/537-checker-inline-and-unverified/src/other/InaccessibleClass.java b/test/537-checker-inline-and-unverified/src/other/InaccessibleClass.java
new file mode 100644
index 0000000000..de2e1d7830
--- /dev/null
+++ b/test/537-checker-inline-and-unverified/src/other/InaccessibleClass.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+public class InaccessibleClass {
+}
diff --git a/test/537-checker-inline-and-unverified/src2/other/InaccessibleClass.java b/test/537-checker-inline-and-unverified/src2/other/InaccessibleClass.java
new file mode 100644
index 0000000000..ff11d7adc9
--- /dev/null
+++ b/test/537-checker-inline-and-unverified/src2/other/InaccessibleClass.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/* package */ class InaccessibleClass {
+}
diff --git a/test/538-checker-embed-constants/expected.txt b/test/538-checker-embed-constants/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/538-checker-embed-constants/expected.txt
diff --git a/test/538-checker-embed-constants/info.txt b/test/538-checker-embed-constants/info.txt
new file mode 100644
index 0000000000..5a722ecf12
--- /dev/null
+++ b/test/538-checker-embed-constants/info.txt
@@ -0,0 +1 @@
+Test embedding of constants in assembler instructions.
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
new file mode 100644
index 0000000000..d8618e30fb
--- /dev/null
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ public static void assertIntEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /// CHECK-START-ARM: int Main.and255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: and {{r\d+}}, {{r\d+}}, #255
+
+ public static int and255(int arg) {
+ return arg & 255;
+ }
+
+ /// CHECK-START-ARM: int Main.and511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int and511(int arg) {
+ return arg & 511;
+ }
+
+ /// CHECK-START-ARM: int Main.andNot15(int) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK: bic {{r\d+}}, {{r\d+}}, #15
+
+ public static int andNot15(int arg) {
+ return arg & ~15;
+ }
+
+ /// CHECK-START-ARM: int Main.or255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: orr {{r\d+}}, {{r\d+}}, #255
+
+ public static int or255(int arg) {
+ return arg | 255;
+ }
+
+ /// CHECK-START-ARM: int Main.or511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int or511(int arg) {
+ return arg | 511;
+ }
+
+ /// CHECK-START-ARM: int Main.orNot15(int) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK: orn {{r\d+}}, {{r\d+}}, #15
+
+ public static int orNot15(int arg) {
+ return arg | ~15;
+ }
+
+ /// CHECK-START-ARM: int Main.xor255(int) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK: eor {{r\d+}}, {{r\d+}}, #255
+
+ public static int xor255(int arg) {
+ return arg ^ 255;
+ }
+
+ /// CHECK-START-ARM: int Main.xor511(int) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int xor511(int arg) {
+ return arg ^ 511;
+ }
+
+ /// CHECK-START-ARM: int Main.xorNot15(int) disassembly (after)
+ /// CHECK: mvn {{r\d+}}, #15
+ /// CHECK: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+ public static int xorNot15(int arg) {
+ return arg ^ ~15;
+ }
+
+ /// CHECK-START-ARM: long Main.and255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-DAG: movs {{r\d+}}, #0
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and255(long arg) {
+ return arg & 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.and511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: movs {{r\d+}}, #0
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and511(long arg) {
+ return arg & 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.andNot15(long) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK: bic {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long andNot15(long arg) {
+ return arg & ~15L;
+ }
+
+ /// CHECK-START-ARM: long Main.and0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+ /// CHECK-DAG: and {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: bic {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: and
+ /// CHECK-NOT: bic
+
+ public static long and0xfffffff00000000f(long arg) {
+ return arg & 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.or255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK: orr {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or255(long arg) {
+ return arg | 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.or511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK: orr{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or511(long arg) {
+ return arg | 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.orNot15(long) disassembly (after)
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: mvn {{r\d+}}, #0
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long orNot15(long arg) {
+ return arg | ~15L;
+ }
+
+ /// CHECK-START-ARM: long Main.or0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mvn {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+ /// CHECK-DAG: orr {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: orn {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-NOT: orr
+ /// CHECK-NOT: orn
+
+ public static long or0xfffffff00000000f(long arg) {
+ return arg | 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.xor255(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #255
+ /// CHECK-NOT: eor
+ /// CHECK: eor {{r\d+}}, {{r\d+}}, #255
+ /// CHECK-NOT: eor
+
+ public static long xor255(long arg) {
+ return arg ^ 255L;
+ }
+
+ /// CHECK-START-ARM: long Main.xor511(long) disassembly (after)
+ /// CHECK: movw {{r\d+}}, #511
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xor511(long arg) {
+ return arg ^ 511L;
+ }
+
+ /// CHECK-START-ARM: long Main.xorNot15(long) disassembly (after)
+ /// CHECK-DAG: mvn {{r\d+}}, #15
+ /// CHECK-DAG: mov.w {{r\d+}}, #-1
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xorNot15(long arg) {
+ return arg ^ ~15L;
+ }
+
+ // Note: No support for partial long constant embedding.
+ /// CHECK-START-ARM: long Main.xor0xfffffff00000000f(long) disassembly (after)
+ /// CHECK-DAG: movs {{r\d+}}, #15
+ /// CHECK-DAG: mvn {{r\d+}}, #15
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-DAG: eor{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+ /// CHECK-NOT: eor
+
+ public static long xor0xfffffff00000000f(long arg) {
+ return arg ^ 0xfffffff00000000fL;
+ }
+
+ /// CHECK-START-ARM: long Main.xor0xf00000000000000f(long) disassembly (after)
+ /// CHECK-NOT: movs {{r\d+}}, #15
+ /// CHECK-NOT: mov.w {{r\d+}}, #-268435456
+ /// CHECK-NOT: eor
+ /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #15
+ /// CHECK-DAG: eor {{r\d+}}, {{r\d+}}, #-268435456
+ /// CHECK-NOT: eor
+
+ public static long xor0xf00000000000000f(long arg) {
+ return arg ^ 0xf00000000000000fL;
+ }
+
+ public static void main(String[] args) {
+ int arg = 0x87654321;
+ assertIntEquals(and255(arg), 0x21);
+ assertIntEquals(and511(arg), 0x121);
+ assertIntEquals(andNot15(arg), 0x87654320);
+ assertIntEquals(or255(arg), 0x876543ff);
+ assertIntEquals(or511(arg), 0x876543ff);
+ assertIntEquals(orNot15(arg), 0xfffffff1);
+ assertIntEquals(xor255(arg), 0x876543de);
+ assertIntEquals(xor511(arg), 0x876542de);
+ assertIntEquals(xorNot15(arg), 0x789abcd1);
+
+ long longArg = 0x1234567887654321L;
+ assertLongEquals(and255(longArg), 0x21L);
+ assertLongEquals(and511(longArg), 0x121L);
+ assertLongEquals(andNot15(longArg), 0x1234567887654320L);
+ assertLongEquals(and0xfffffff00000000f(longArg), 0x1234567000000001L);
+ assertLongEquals(or255(longArg), 0x12345678876543ffL);
+ assertLongEquals(or511(longArg), 0x12345678876543ffL);
+ assertLongEquals(orNot15(longArg), 0xfffffffffffffff1L);
+ assertLongEquals(or0xfffffff00000000f(longArg), 0xfffffff88765432fL);
+ assertLongEquals(xor255(longArg), 0x12345678876543deL);
+ assertLongEquals(xor511(longArg), 0x12345678876542deL);
+ assertLongEquals(xorNot15(longArg), 0xedcba987789abcd1L);
+ assertLongEquals(xor0xfffffff00000000f(longArg), 0xedcba9888765432eL);
+ assertLongEquals(xor0xf00000000000000f(longArg), 0xe23456788765432eL);
+ }
+}
diff --git a/test/539-checker-arm64-encodable-immediates/expected.txt b/test/539-checker-arm64-encodable-immediates/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/expected.txt
diff --git a/test/539-checker-arm64-encodable-immediates/info.txt b/test/539-checker-arm64-encodable-immediates/info.txt
new file mode 100644
index 0000000000..efeef33231
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/info.txt
@@ -0,0 +1,2 @@
+Basic tests that check the compiler recognizes when constant values can be
+encoded in the immediate field of instructions.
diff --git a/test/539-checker-arm64-encodable-immediates/src/Main.java b/test/539-checker-arm64-encodable-immediates/src/Main.java
new file mode 100644
index 0000000000..7e3ff9fde8
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /**
+ * Test that the `-1` constant is not synthesized in a register and that we
+ * instead simply switch between `add` and `sub` instructions with the
+ * constant embedded.
+ * We need two uses (or more) of the constant because the compiler always
+ * delegates the immediate value handling to VIXL when there is only one use.
+ */
+
+ /// CHECK-START-ARM64: long Main.addM1(long) register (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK: <<ConstM1:j\d+>> LongConstant -1
+ /// CHECK-NOT: ParallelMove
+ /// CHECK: Add [<<Arg>>,<<ConstM1>>]
+ /// CHECK: Sub [<<Arg>>,<<ConstM1>>]
+
+ /// CHECK-START-ARM64: long Main.addM1(long) disassembly (after)
+ /// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK: add x{{\d+}}, x{{\d+}}, #0x1
+
+ public static long addM1(long arg) {
+ return (arg + (-1)) | (arg - (-1));
+ }
+
+ public static void main(String[] args) {
+ assertLongEquals(14, addM1(7));
+ }
+}
diff --git a/test/540-checker-rtp-bug/expected.txt b/test/540-checker-rtp-bug/expected.txt
new file mode 100644
index 0000000000..2cf2842aa5
--- /dev/null
+++ b/test/540-checker-rtp-bug/expected.txt
@@ -0,0 +1 @@
+instanceof failed
diff --git a/test/540-checker-rtp-bug/info.txt b/test/540-checker-rtp-bug/info.txt
new file mode 100644
index 0000000000..852cd7c1b4
--- /dev/null
+++ b/test/540-checker-rtp-bug/info.txt
@@ -0,0 +1 @@
+Test that we set the proper types for objects (b/25008765).
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
new file mode 100644
index 0000000000..e9f16c04d9
--- /dev/null
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class Final {
+ public String toString() {
+ return "final";
+ }
+}
+
+public class Main {
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>] klass:Final
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) instruction_simplifier_after_types (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>]
+ /// CHECK: Return [<<Ret>>]
+ public static Final testKeepCheckCast(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return (Final) x;
+ }
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) dead_code_elimination (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+ public static void testKeepInstanceOf(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ if (x instanceof Final) {
+ System.out.println("instanceof succeed");
+ } else {
+ System.out.println("instanceof failed");
+ }
+ }
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) inliner (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+ public static String testNoInline(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return x.toString();
+ }
+
+ public static void main(String[] args) {
+ try {
+ testKeepCheckCast(new Object(), true);
+ throw new Error("Expected check cast exception");
+ } catch (ClassCastException e) {
+ // expected
+ }
+
+ testKeepInstanceOf(new Object(), true);
+
+ if ("final".equals(testNoInline(new Object(), true))) {
+ throw new Error("Bad inlining");
+ }
+ }
+}
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index 2aeca8c8fc..b7546801b9 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
# limitations under the License.
# Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental-lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental-lambdas
+${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
new file mode 100755
index 0000000000..c7866878e9
--- /dev/null
+++ b/test/960-default-smali/build
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# Generate the smali Main.smali file or fail
+./util-src/generate_smali.py ./smali
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/expected.txt b/test/960-default-smali/expected.txt
new file mode 100644
index 0000000000..7671eed5de
--- /dev/null
+++ b/test/960-default-smali/expected.txt
@@ -0,0 +1,84 @@
+Testing for type A
+A-virtual A.SayHi()='Hi '
+A-interface Greeter.SayHi()='Hi '
+A-virtual A.SayHiTwice()='Hi Hi '
+A-interface Greeter.SayHiTwice()='Hi Hi '
+End testing for type A
+Testing for type B
+B-virtual B.SayHi()='Hello '
+B-interface Greeter.SayHi()='Hello '
+B-interface Greeter2.SayHi()='Hello '
+B-virtual B.SayHiTwice()='I say Hello Hello '
+B-interface Greeter.SayHiTwice()='I say Hello Hello '
+B-interface Greeter2.SayHiTwice()='I say Hello Hello '
+End testing for type B
+Testing for type C
+C-virtual A.SayHi()='Hi '
+C-virtual C.SayHi()='Hi '
+C-interface Greeter.SayHi()='Hi '
+C-virtual A.SayHiTwice()='You don't control me'
+C-virtual C.SayHiTwice()='You don't control me'
+C-interface Greeter.SayHiTwice()='You don't control me'
+End testing for type C
+Testing for type D
+D-virtual D.GetName()='Alex '
+D-interface Greeter3.GetName()='Alex '
+D-virtual D.SayHi()='Hello Alex '
+D-interface Greeter.SayHi()='Hello Alex '
+D-interface Greeter3.SayHi()='Hello Alex '
+D-virtual D.SayHiTwice()='Hello Alex Hello Alex '
+D-interface Greeter.SayHiTwice()='Hello Alex Hello Alex '
+D-interface Greeter3.SayHiTwice()='Hello Alex Hello Alex '
+End testing for type D
+Testing for type E
+E-virtual A.SayHi()='Hi2 '
+E-virtual E.SayHi()='Hi2 '
+E-interface Greeter.SayHi()='Hi2 '
+E-interface Greeter2.SayHi()='Hi2 '
+E-virtual A.SayHiTwice()='I say Hi2 Hi2 '
+E-virtual E.SayHiTwice()='I say Hi2 Hi2 '
+E-interface Greeter.SayHiTwice()='I say Hi2 Hi2 '
+E-interface Greeter2.SayHiTwice()='I say Hi2 Hi2 '
+End testing for type E
+Testing for type F
+F-interface Attendant.GetPlace()='android'
+F-virtual F.GetPlace()='android'
+F-virtual A.SayHi()='Hi '
+F-interface Attendant.SayHi()='Hi '
+F-virtual F.SayHi()='Hi '
+F-interface Greeter.SayHi()='Hi '
+F-virtual A.SayHiTwice()='We can override both interfaces'
+F-interface Attendant.SayHiTwice()='We can override both interfaces'
+F-virtual F.SayHiTwice()='We can override both interfaces'
+F-interface Greeter.SayHiTwice()='We can override both interfaces'
+End testing for type F
+Testing for type G
+G-interface Attendant.GetPlace()='android'
+G-virtual G.GetPlace()='android'
+G-interface Attendant.SayHi()='welcome to android'
+G-virtual G.SayHi()='welcome to android'
+G-interface Attendant.SayHiTwice()='welcome to androidwelcome to android'
+G-virtual G.SayHiTwice()='welcome to androidwelcome to android'
+End testing for type G
+Testing for type H
+H-interface Extension.SayHi()='welcome '
+H-virtual H.SayHi()='welcome '
+End testing for type H
+Testing for type I
+I-virtual A.SayHi()='Hi '
+I-interface Greeter.SayHi()='Hi '
+I-interface Greeter2.SayHi()='Hi '
+I-virtual I.SayHi()='Hi '
+I-virtual A.SayHiTwice()='I say Hi Hi '
+I-interface Greeter.SayHiTwice()='I say Hi Hi '
+I-interface Greeter2.SayHiTwice()='I say Hi Hi '
+I-virtual I.SayHiTwice()='I say Hi Hi '
+End testing for type I
+Testing for type J
+J-virtual A.SayHi()='Hi '
+J-interface Greeter.SayHi()='Hi '
+J-virtual J.SayHi()='Hi '
+J-virtual A.SayHiTwice()='Hi Hi '
+J-interface Greeter.SayHiTwice()='Hi Hi '
+J-virtual J.SayHiTwice()='Hi Hi '
+End testing for type J
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
new file mode 100644
index 0000000000..eb596e2c9f
--- /dev/null
+++ b/test/960-default-smali/info.txt
@@ -0,0 +1,19 @@
+Smali-based tests for experimental interface default methods.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run a Main.smali file will be generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the tools/extract-embedded-java script to
+turn the smali into equivalent Java using the embedded Java code.
+
+When updating be sure to write the equivalent Java code in comments of the smali
+files.
+
+Care should be taken when updating the generate_smali.py script. It must always
+return equivalent output when run multiple times.
+
+To update the test files do the following steps:
+ <Add new classes/interfaces>
+ <Add these classes/interfaces to ./smali/classes.xml>
+ JAVA_HOME="/path/to/java-8-jdk" ../run-test --use-java-home --update --jvm --host 956-default-smali
+ git add ./smali/classes.xml ./expected.txt
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/960-default-smali/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/960-default-smali/smali/A.smali b/test/960-default-smali/smali/A.smali
new file mode 100644
index 0000000000..e755612fbe
--- /dev/null
+++ b/test/960-default-smali/smali/A.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LA;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# class A implements Greeter {
+# public String SayHi() {
+# return "Hi ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hi "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Attendant.smali b/test/960-default-smali/smali/Attendant.smali
new file mode 100644
index 0000000000..ab63aeefcb
--- /dev/null
+++ b/test/960-default-smali/smali/Attendant.smali
@@ -0,0 +1,53 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LAttendant;
+.super Ljava/lang/Object;
+
+# public interface Attendant {
+# public default String SayHi() {
+# return "welcome to " + GetPlace();
+# }
+# public default String SayHiTwice() {
+# return SayHi() + SayHi();
+# }
+#
+# public String GetPlace();
+# }
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ const-string v0, "welcome to "
+ invoke-interface {p0}, LAttendant;->GetPlace()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 2
+ invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LAttendant;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
+
+.method public abstract GetPlace()Ljava/lang/String;
+.end method
diff --git a/test/960-default-smali/smali/B.smali b/test/960-default-smali/smali/B.smali
new file mode 100644
index 0000000000..d847dd12ff
--- /dev/null
+++ b/test/960-default-smali/smali/B.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LB;
+.super Ljava/lang/Object;
+.implements LGreeter2;
+
+# class B implements Greeter2 {
+# public String SayHi() {
+# return "Hello ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hello "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/C.smali b/test/960-default-smali/smali/C.smali
new file mode 100644
index 0000000000..08a8508be1
--- /dev/null
+++ b/test/960-default-smali/smali/C.smali
@@ -0,0 +1,37 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LC;
+.super LA;
+
+# class C extends A {
+# public String SayHiTwice() {
+# return "You don't control me";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "You don't control me"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/D.smali b/test/960-default-smali/smali/D.smali
new file mode 100644
index 0000000000..32f3b7ec8b
--- /dev/null
+++ b/test/960-default-smali/smali/D.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LD;
+.super Ljava/lang/Object;
+.implements LGreeter3;
+
+# class D implements Greeter3 {
+# public String GetName() {
+# return "Alex ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public GetName()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Alex "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/E.smali b/test/960-default-smali/smali/E.smali
new file mode 100644
index 0000000000..bae6250414
--- /dev/null
+++ b/test/960-default-smali/smali/E.smali
@@ -0,0 +1,38 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LE;
+.super LA;
+.implements LGreeter2;
+
+# class E extends A implements Greeter2 {
+# public String SayHi() {
+# return "Hi2 ";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "Hi2 "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Extension.smali b/test/960-default-smali/smali/Extension.smali
new file mode 100644
index 0000000000..60ffa26ec6
--- /dev/null
+++ b/test/960-default-smali/smali/Extension.smali
@@ -0,0 +1,30 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LExtension;
+.super Ljava/lang/Object;
+
+# public interface Extension {
+# public default String SayHi() {
+# return "welcome ";
+# }
+# }
+
+.method public SayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "welcome "
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/F.smali b/test/960-default-smali/smali/F.smali
new file mode 100644
index 0000000000..3eaa089e1f
--- /dev/null
+++ b/test/960-default-smali/smali/F.smali
@@ -0,0 +1,47 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LF;
+.super LA;
+.implements LAttendant;
+
+# class F extends A implements Attendant {
+# public String GetPlace() {
+# return "android";
+# }
+# public String SayHiTwice() {
+# return "We can override both interfaces";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .registers 1
+
+ const-string v0, "We can override both interfaces"
+ return-object v0
+.end method
+
+.method public GetPlace()Ljava/lang/String;
+ .registers 1
+ const-string v0, "android"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/G.smali b/test/960-default-smali/smali/G.smali
new file mode 100644
index 0000000000..446f2a4c64
--- /dev/null
+++ b/test/960-default-smali/smali/G.smali
@@ -0,0 +1,37 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LG;
+.super Ljava/lang/Object;
+.implements LAttendant;
+
+# class G implements Attendant {
+# public String GetPlace() {
+# return "android";
+# }
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public GetPlace()Ljava/lang/String;
+ .registers 1
+ const-string v0, "android"
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter.smali b/test/960-default-smali/smali/Greeter.smali
new file mode 100644
index 0000000000..28530ffc6f
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter;
+.super Ljava/lang/Object;
+
+# public interface Greeter {
+# public String SayHi();
+#
+# public default String SayHiTwice() {
+# return SayHi() + SayHi();
+# }
+# }
+
+.method public abstract SayHi()Ljava/lang/String;
+.end method
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 2
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter2.smali b/test/960-default-smali/smali/Greeter2.smali
new file mode 100644
index 0000000000..ace1798bab
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter2.smali
@@ -0,0 +1,39 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter2;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# public interface Greeter2 extends Greeter {
+# public default String SayHiTwice() {
+# return "I say " + SayHi() + SayHi();
+# }
+# }
+
+.method public SayHiTwice()Ljava/lang/String;
+ .locals 3
+ const-string v0, "I say "
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-interface {p0}, LGreeter;->SayHi()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/Greeter3.smali b/test/960-default-smali/smali/Greeter3.smali
new file mode 100644
index 0000000000..31fc2e79ff
--- /dev/null
+++ b/test/960-default-smali/smali/Greeter3.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public abstract interface LGreeter3;
+.super Ljava/lang/Object;
+.implements LGreeter;
+
+# public interface Greeter3 extends Greeter {
+# public String GetName();
+#
+# public default String SayHi() {
+# return "Hello " + GetName();
+# }
+# }
+
+.method public abstract GetName()Ljava/lang/String;
+.end method
+
+.method public SayHi()Ljava/lang/String;
+ .locals 2
+ const-string v0, "Hello "
+ invoke-interface {p0}, LGreeter3;->GetName()Ljava/lang/String;
+ move-result-object v1
+ invoke-virtual {v0, v1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ return-object v0
+.end method
diff --git a/test/960-default-smali/smali/H.smali b/test/960-default-smali/smali/H.smali
new file mode 100644
index 0000000000..82065ea49d
--- /dev/null
+++ b/test/960-default-smali/smali/H.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LH;
+.super Ljava/lang/Object;
+.implements LExtension;
+
+# class H implements Extension {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/960-default-smali/smali/I.smali b/test/960-default-smali/smali/I.smali
new file mode 100644
index 0000000000..72fb58afe4
--- /dev/null
+++ b/test/960-default-smali/smali/I.smali
@@ -0,0 +1,28 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LI;
+.super LA;
+.implements LGreeter2;
+
+# class I extends A implements Greeter2 {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
diff --git a/test/960-default-smali/smali/J.smali b/test/960-default-smali/smali/J.smali
new file mode 100644
index 0000000000..93f3d6231c
--- /dev/null
+++ b/test/960-default-smali/smali/J.smali
@@ -0,0 +1,29 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LJ;
+.super LA;
+
+# class J extends A {
+# }
+
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, LA;-><init>()V
+ return-void
+.end method
+
diff --git a/test/960-default-smali/smali/classes.xml b/test/960-default-smali/smali/classes.xml
new file mode 100644
index 0000000000..0aa41f7fb6
--- /dev/null
+++ b/test/960-default-smali/smali/classes.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<data>
+ <classes>
+ <class name="A" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="B" super="java/lang/Object">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="C" super="A">
+ <implements> </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="D" super="java/lang/Object">
+ <implements>
+ <item>Greeter3</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="E" super="A">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="F" super="A">
+ <implements>
+ <item>Attendant</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="G" super="java/lang/Object">
+ <implements>
+ <item>Attendant</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="H" super="java/lang/Object">
+ <implements>
+ <item>Extension</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="I" super="A">
+ <implements>
+ <item>Greeter2</item>
+ </implements>
+ <methods> </methods>
+ </class>
+
+ <class name="J" super="A">
+ <implements> </implements>
+ <methods> </methods>
+ </class>
+ </classes>
+
+ <interfaces>
+ <interface name="Extension" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="default">SayHi</method>
+ </methods>
+ </interface>
+
+ <interface name="Greeter" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="abstract">SayHi</method>
+ <method type="default">SayHiTwice</method>
+ </methods>
+ </interface>
+
+ <interface name="Greeter2" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods> </methods>
+ </interface>
+
+ <interface name="Greeter3" super="java/lang/Object">
+ <implements>
+ <item>Greeter</item>
+ </implements>
+ <methods>
+ <method type="abstract">GetName</method>
+ </methods>
+ </interface>
+
+ <interface name="Attendant" super="java/lang/Object">
+ <implements> </implements>
+ <methods>
+ <method type="default">SayHi</method>
+ <method type="default">SayHiTwice</method>
+ <method type="abstract">GetPlace</method>
+ </methods>
+ </interface>
+ </interfaces>
+</data>
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/960-default-smali/util-src/generate_smali.py
new file mode 100755
index 0000000000..b2bf1f0761
--- /dev/null
+++ b/test/960-default-smali/util-src/generate_smali.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali Main file for test 960
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright
+import testgen.mixins as mixins
+
+from collections import namedtuple
+import itertools
+import functools
+import xml.etree.ElementTree as ET
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A mainclass and main method for this test.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{test_funcs}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass
+ """
+ self.tests = set()
+ self.global_funcs = set()
+
+ def add_instance(self, it):
+ """
+ Add an instance test for the given class
+ """
+ self.tests.add(it)
+
+ def add_func(self, f):
+ """
+ Add a function to the class
+ """
+ self.global_funcs.add(f)
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print this class
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in sorted(all_tests):
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ funcs = ""
+ for f in self.global_funcs:
+ funcs += str(f)
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ test_groups=test_groups,
+ main_func=main_func, test_funcs=funcs)
+
+
+class InstanceTest(mixins.Named, mixins.NameComparableMixin):
+ """
+ A method that runs tests for a particular concrete type, It calls the test
+ cases for running it in all possible ways.
+ """
+
+ INSTANCE_TEST_TEMPLATE = """
+# public static void {test_name}() {{
+# System.out.println("Testing for type {ty}");
+# String s = "{ty}";
+# {ty} v = new {ty}();
+.method public static {test_name}()V
+ .locals 3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "Testing for type {ty}"
+ invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ const-string v0, "{ty}"
+ new-instance v1, L{ty};
+ invoke-direct {{v1}}, L{ty};-><init>()V
+
+ {invokes}
+
+ const-string v0, "End testing for type {ty}"
+ invoke-virtual {{v2,v0}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+# System.out.println("End testing for type {ty}");
+# }}
+"""
+
+ TEST_INVOKE_TEMPLATE = """
+# {fname}(s, v);
+ invoke-static {{v0, v1}}, {fname}(Ljava/lang/String;L{farg};)V
+"""
+
+ def __init__(self, main, ty):
+ """
+ Initialize this test group for the given type
+ """
+ self.ty = ty
+ self.main = main
+ self.funcs = set()
+ self.main.add_instance(self)
+
+ def get_name(self):
+ """
+ Get the name of this test group
+ """
+ return "TEST_NAME_"+self.ty
+
+ def add_func(self, f):
+ """
+ Add a test function to this test group
+ """
+ self.main.add_func(f)
+ self.funcs.add(f)
+
+ def __str__(self):
+ """
+ Returns the smali code for this function
+ """
+ func_invokes = ""
+ for f in sorted(self.funcs, key=lambda a: (a.func, a.farg)):
+ func_invokes += self.TEST_INVOKE_TEMPLATE.format(fname=f.get_name(),
+ farg=f.farg)
+
+ return self.INSTANCE_TEST_TEMPLATE.format(test_name=self.get_name(), ty=self.ty,
+ invokes=func_invokes)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A single test case that attempts to invoke a function on receiver of a given type.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}(String s, {farg} v) {{
+# try {{
+# System.out.printf("%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n", s, v.{callfunc}());
+# return;
+# }} catch (Error e) {{
+# System.out.printf("%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n", s);
+# e.printStackTrace(System.out);
+# }}
+# }}
+.method public static {fname}(Ljava/lang/String;L{farg};)V
+ .locals 7
+ :call_{fname}_try_start
+ const/4 v0, 2
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ aput-object p0,v1,v0
+
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "%s-{invoke_type:<9} {farg:>9}.{callfunc}()='%s'\\n"
+
+ invoke-{invoke_type} {{p1}}, L{farg};->{callfunc}()Ljava/lang/String;
+ move-result-object v4
+ const/4 v0, 1
+ aput-object v4, v1, v0
+
+ invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ const/4 v0, 1
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ aput-object p0, v1, v0
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v4, "%s-{invoke_type} on {farg}: {callfunc}() threw exception!\\n"
+ invoke-virtual {{v2,v4,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ def __init__(self, func, farg, invoke):
+ """
+ Initialize this test function for the given invoke type and argument
+ """
+ self.func = func
+ self.farg = farg
+ self.invoke = invoke
+
+ def get_name(self):
+ """
+ Get the name of this test
+ """
+ return "Test_Func_{}_{}_{}".format(self.func, self.farg, self.invoke)
+
+ def __str__(self):
+ """
+ Get the smali code for this test function
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(),
+ farg=self.farg,
+ invoke_type=self.invoke,
+ callfunc=self.func)
+
+def flatten_classes(classes, c):
+ """
+ Iterate over all the classes 'c' can be used as
+ """
+ while c:
+ yield c
+ c = classes.get(c.super_class)
+
+def flatten_class_methods(classes, c):
+ """
+ Iterate over all the methods 'c' can call
+ """
+ for c1 in flatten_classes(classes, c):
+ yield from c1.methods
+
+def flatten_interfaces(dat, c):
+ """
+ Iterate over all the interfaces 'c' transitively implements
+ """
+ def get_ifaces(cl):
+ for i2 in cl.implements:
+ yield dat.interfaces[i2]
+ yield from get_ifaces(dat.interfaces[i2])
+
+ for cl in flatten_classes(dat.classes, c):
+ yield from get_ifaces(cl)
+
+def flatten_interface_methods(dat, i):
+ """
+ Iterate over all the interface methods 'c' can call
+ """
+ yield from i.methods
+ for i2 in flatten_interfaces(dat, i):
+ yield from i2.methods
+
+def make_main_class(dat):
+ """
+ Creates a Main.smali file that runs all the tests
+ """
+ m = MainClass()
+ for c in dat.classes.values():
+ i = InstanceTest(m, c.name)
+ for clazz in flatten_classes(dat.classes, c):
+ for meth in flatten_class_methods(dat.classes, clazz):
+ i.add_func(Func(meth, clazz.name, 'virtual'))
+ for iface in flatten_interfaces(dat, clazz):
+ for meth in flatten_interface_methods(dat, iface):
+ i.add_func(Func(meth, clazz.name, 'virtual'))
+ i.add_func(Func(meth, iface.name, 'interface'))
+ return m
+
+class TestData(namedtuple("TestData", ['classes', 'interfaces'])):
+ """
+ A class representing the classes.xml document.
+ """
+ pass
+
+class Clazz(namedtuple("Clazz", ["name", "methods", "super_class", "implements"])):
+ """
+ A class representing a class element in the classes.xml document.
+ """
+ pass
+
+class IFace(namedtuple("IFace", ["name", "methods", "super_class", "implements"])):
+ """
+ A class representing an interface element in the classes.xml document.
+ """
+ pass
+
+def parse_xml(xml):
+ """
+ Parse the xml description of this test.
+ """
+ classes = dict()
+ ifaces = dict()
+ root = ET.fromstring(xml)
+ for iface in root.find("interfaces"):
+ name = iface.attrib['name']
+ implements = [a.text for a in iface.find("implements")]
+ methods = [a.text for a in iface.find("methods")]
+ ifaces[name] = IFace(name = name,
+ super_class = iface.attrib['super'],
+ methods = methods,
+ implements = implements)
+ for clazz in root.find('classes'):
+ name = clazz.attrib['name']
+ implements = [a.text for a in clazz.find("implements")]
+ methods = [a.text for a in clazz.find("methods")]
+ classes[name] = Clazz(name = name,
+ super_class = clazz.attrib['super'],
+ methods = methods,
+ implements = implements)
+ return TestData(classes, ifaces)
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ class_data = parse_xml((smali_dir / "classes.xml").open().read())
+ make_main_class(class_data).dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
new file mode 100755
index 0000000000..707c17e1cf
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/build
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+mkdir -p ./smali
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# Generate the smali files and expected.txt or fail
+./util-src/generate_smali.py ./smali ./expected.txt
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/961-default-iface-resolution-generated/expected.txt b/test/961-default-iface-resolution-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/961-default-iface-resolution-generated/info.txt b/test/961-default-iface-resolution-generated/info.txt
new file mode 100644
index 0000000000..2cd2cc75b7
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for experimental interface default methods.
+
+This tests that interface method resolution order is correct.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the
+$(ANDROID_BUILD_TOP)/art/tools/extract-embedded-java script to turn the smali
+into equivalent Java using the embedded Java code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/961-default-iface-resolution-generated/util-src/generate_smali.py b/test/961-default-iface-resolution-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..921a096dd3
--- /dev/null
+++ b/test/961-default-iface-resolution-generated/util-src/generate_smali.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 961.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the type tree can have. Includes the class object in the tree.
+# Increasing this increases the number of generated files significantly. This
+# value was chosen as it is fairly quick to run and very comprehensive, checking
+# every possible interface tree up to 5 layers deep.
+MAX_IFACE_DEPTH = 5
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the MainClass smali code.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright("smali"),
+ test_groups = test_groups,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# try {{
+# {farg} v = new {farg}();
+# System.out.printf("%s calls default method on %s\\n",
+# v.CalledClassName(),
+# v.CalledInterfaceName());
+# return;
+# }} catch (Error e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ :call_{fname}_try_start
+ new-instance v6, L{farg};
+ invoke-direct {{v6}}, L{farg};-><init>()V
+
+ const/4 v0, 2
+ new-array v1,v0, [Ljava/lang/Object;
+ const/4 v0, 0
+ invoke-virtual {{v6}}, L{farg};->CalledClassName()Ljava/lang/String;
+ move-result-object v4
+ aput-object v4,v1,v0
+
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "%s calls default method on %s\\n"
+
+ invoke-virtual {{v6}}, L{farg};->CalledInterfaceName()Ljava/lang/String;
+ move-result-object v4
+ const/4 v0, 1
+ aput-object v4, v1, v0
+
+ invoke-virtual {{v2,v3,v1}}, Ljava/io/PrintStream;->printf(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def get_expected(self):
+ """
+ Get the expected output calling this function.
+ """
+ return "{tree} calls default method on {iface_tree}".format(
+ tree = self.farg.get_tree(), iface_tree = self.farg.get_called().get_tree())
+
+ def get_name(self):
+ """
+ Get the name of this function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def __str__(self):
+ """
+ Print the smali code of this function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(), farg=self.farg.get_name())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test default method resolution order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+.implements L{iface_name};
+
+# public class {class_name} implements {iface_name} {{
+# public String CalledClassName() {{
+# return "{tree}";
+# }}
+# }}
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public CalledClassName()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{tree}"
+ return-object v0
+.end method
+"""
+
+ def __init__(self, iface):
+ """
+ Initialize this test class which implements the given interface
+ """
+ self.iface = iface
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iface_tree}]".format(class_name = self.class_name,
+ iface_tree = self.iface.get_tree())
+
+ def __iter__(self):
+ """
+ Step through all interfaces implemented transitively by this class
+ """
+ yield self.iface
+ yield from self.iface
+
+ def get_called(self):
+ """
+ Get the interface whose default method would be called when calling the
+ CalledInterfaceName function.
+ """
+ all_ifaces = set(iface for iface in self if iface.default)
+ for i in all_ifaces:
+ if all(map(lambda j: i not in j.get_super_types(), all_ifaces)):
+ return i
+ raise Exception("UNREACHABLE! Unable to find default method!")
+
+ def __str__(self):
+ """
+ Print the smali code of this class.
+ """
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ iface_name = self.iface.get_name(),
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+# public String CalledClassName();
+.method public abstract CalledClassName()Ljava/lang/String;
+.end method
+
+{funcs}
+
+# }}
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default String CalledInterfaceName() {{
+# return "{tree}";
+# }}
+.method public CalledInterfaceName()Ljava/lang/String;
+ .locals 1
+ const-string v0, "{tree}"
+ return-object v0
+.end method
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ def __init__(self, ifaces, default):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = sorted(ifaces)
+ self.default = default
+ end = "_DEFAULT" if default else ""
+ self.class_name = "INTERFACE_"+gensym()+end
+
+ def get_super_types(self):
+ """
+ Returns a set of all the supertypes of this interface
+ """
+ return set(i2 for i2 in self)
+
+ def get_name(self):
+ """
+ Get the name of this class
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def __str__(self):
+ """
+ Print the smali code of this interface.
+ """
+ s_ifaces = " "
+ j_ifaces = " "
+ for i in self.ifaces:
+ s_ifaces += self.IMPLEMENTS_TEMPLATE.format(iface_name = i.get_name())
+ j_ifaces += " {},".format(i.get_name())
+ j_ifaces = j_ifaces[0:-1]
+ if self.default:
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(ifaces = j_ifaces,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def print_tree(ifaces):
+ """
+ Prints a list of iface trees
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+# The deduplicated output of subtree_sizes for each size up to
+# MAX_LEAF_IFACE_PER_OBJECT.
+SUBTREES = [set(tuple(sorted(l)) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_interface_trees():
+ """
+ Return all legal interface trees
+ """
+ def dump_supers(s):
+ """
+ Does depth first traversal of all the interfaces in the list.
+ """
+ for i in s:
+ yield i
+ yield from i
+
+ def create_interface_trees_inner(num, allow_default):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ if sub == 1:
+ ifaces.append([TestInterface([], allow_default)])
+ if allow_default:
+ ifaces[-1].append(TestInterface([], False))
+ else:
+ ifaces.append(list(create_interface_trees_inner(sub, allow_default)))
+ for supers in itertools.product(*ifaces):
+ all_supers = sorted(set(dump_supers(supers)) - set(supers))
+ for i in range(len(all_supers) + 1):
+ for combo in itertools.combinations(all_supers, i):
+ yield TestInterface(list(combo) + list(supers), allow_default)
+ if allow_default:
+ for i in range(len(split)):
+ ifaces = []
+ for sub, cs in zip(split, itertools.count()):
+ if sub == 1:
+ ifaces.append([TestInterface([], i == cs)])
+ else:
+ ifaces.append(list(create_interface_trees_inner(sub, i == cs)))
+ for supers in itertools.product(*ifaces):
+ all_supers = sorted(set(dump_supers(supers)) - set(supers))
+ for i in range(len(all_supers) + 1):
+ for combo in itertools.combinations(all_supers, i):
+ yield TestInterface(list(combo) + list(supers), False)
+
+ for num in range(1, MAX_IFACE_DEPTH):
+ yield from create_interface_trees_inner(num, True)
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for tree in create_interface_trees():
+ classes.add(tree)
+ for i in tree:
+ classes.add(i)
+ test_class = TestClass(tree)
+ mc.add_test(test_class)
+ classes.add(test_class)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
new file mode 100755
index 0000000000..5ad82f70d1
--- /dev/null
+++ b/test/962-iface-static/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/expected.txt b/test/962-iface-static/expected.txt
new file mode 100644
index 0000000000..6d98ea1571
--- /dev/null
+++ b/test/962-iface-static/expected.txt
@@ -0,0 +1,3 @@
+init
+constructor
+Hello
diff --git a/test/962-iface-static/info.txt b/test/962-iface-static/info.txt
new file mode 100644
index 0000000000..d4732e533d
--- /dev/null
+++ b/test/962-iface-static/info.txt
@@ -0,0 +1,4 @@
+Smali-based tests for experimental interface static methods.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
new file mode 100755
index 0000000000..e713708c18
--- /dev/null
+++ b/test/962-iface-static/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/962-iface-static/smali/Displayer.smali b/test/962-iface-static/smali/Displayer.smali
new file mode 100644
index 0000000000..06bec16432
--- /dev/null
+++ b/test/962-iface-static/smali/Displayer.smali
@@ -0,0 +1,45 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public class Displayer {
+# static {
+# System.out.println("init");
+# }
+#
+# public Displayer() {
+# System.out.println("constructor");
+# }
+# }
+
+.class public LDisplayer;
+.super Ljava/lang/Object;
+
+.method public static <clinit>()V
+ .locals 3
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "init"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+
+.method public constructor <init>()V
+ .locals 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v0, "constructor"
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
diff --git a/test/962-iface-static/smali/Main.smali b/test/962-iface-static/smali/Main.smali
new file mode 100644
index 0000000000..72fa5e0e6e
--- /dev/null
+++ b/test/962-iface-static/smali/Main.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main {
+# public static void main(String[] args) {
+# System.out.println(iface.SayHi());
+# }
+# }
+.class public LMain;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ invoke-static {}, Liface;->SayHi()Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/962-iface-static/smali/iface.smali b/test/962-iface-static/smali/iface.smali
new file mode 100644
index 0000000000..441aae669e
--- /dev/null
+++ b/test/962-iface-static/smali/iface.smali
@@ -0,0 +1,43 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface {
+# public static final Displayer f = new Displayer();
+#
+# public static String SayHi() {
+# return "Hello";
+# }
+# }
+
+.class public abstract interface Liface;
+.super Ljava/lang/Object;
+
+.field public final static f:LDisplayer;
+
+.method public static <clinit>()V
+ .locals 3
+ new-instance v1, LDisplayer;
+ invoke-direct {v1}, LDisplayer;-><init>()V
+ sput-object v1, Liface;->f:LDisplayer;
+ return-void
+.end method
+
+.method public static SayHi()Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hello"
+ return-object v0
+.end method
+
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
new file mode 100755
index 0000000000..5ad82f70d1
--- /dev/null
+++ b/test/963-default-range-smali/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/expected.txt b/test/963-default-range-smali/expected.txt
new file mode 100644
index 0000000000..af17d2f873
--- /dev/null
+++ b/test/963-default-range-smali/expected.txt
@@ -0,0 +1,2 @@
+Hello
+Hello
diff --git a/test/963-default-range-smali/info.txt b/test/963-default-range-smali/info.txt
new file mode 100644
index 0000000000..d4732e533d
--- /dev/null
+++ b/test/963-default-range-smali/info.txt
@@ -0,0 +1,4 @@
+Smali-based tests for experimental interface static methods.
+
+To run with --jvm you must export JAVA_HOME to a Java 8 Language installation
+and pass the --use-java-home to run-test
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
new file mode 100755
index 0000000000..e713708c18
--- /dev/null
+++ b/test/963-default-range-smali/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/963-default-range-smali/smali/A.smali b/test/963-default-range-smali/smali/A.smali
new file mode 100644
index 0000000000..b3d91dd76b
--- /dev/null
+++ b/test/963-default-range-smali/smali/A.smali
@@ -0,0 +1,29 @@
+# /*
+# * Copyright 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+.class public LA;
+.super Ljava/lang/Object;
+.implements Liface;
+
+# class A implements iface {
+# }
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
diff --git a/test/963-default-range-smali/smali/Main.smali b/test/963-default-range-smali/smali/Main.smali
new file mode 100644
index 0000000000..400fba72d9
--- /dev/null
+++ b/test/963-default-range-smali/smali/Main.smali
@@ -0,0 +1,77 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# class Main {
+# public static void main(String[] args) {
+# A a = new A();
+# System.out.println(a.SayHi("a string 0",
+# "a string 1",
+# "a string 2",
+# "a string 3",
+# "a string 4",
+# "a string 5",
+# "a string 6",
+# "a string 7",
+# "a string 8",
+# "a string 9"));
+# iface b = (iface)a;
+# System.out.println(b.SayHi("a string 0",
+# "a string 1",
+# "a string 2",
+# "a string 3",
+# "a string 4",
+# "a string 5",
+# "a string 6",
+# "a string 7",
+# "a string 8",
+# "a string 9"));
+# }
+# }
+.class public LMain;
+.super Ljava/lang/Object;
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+.method public static main([Ljava/lang/String;)V
+ .locals 15
+ sget-object v12, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+ new-instance v1, LA;
+ invoke-direct {v1}, LA;-><init>()V
+ const-string v2, "a string 0"
+ const-string v3, "a string 1"
+ const-string v4, "a string 2"
+ const-string v5, "a string 3"
+ const-string v6, "a string 4"
+ const-string v7, "a string 5"
+ const-string v8, "a string 6"
+ const-string v9, "a string 7"
+ const-string v10, "a string 8"
+ const-string v11, "a string 9"
+ invoke-virtual/range {v1 .. v11}, LA;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-interface/range {v1 .. v11}, Liface;->SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ invoke-virtual {v12,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+.end method
diff --git a/test/963-default-range-smali/smali/iface.smali b/test/963-default-range-smali/smali/iface.smali
new file mode 100644
index 0000000000..c2c3ce69a7
--- /dev/null
+++ b/test/963-default-range-smali/smali/iface.smali
@@ -0,0 +1,40 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# public interface iface {
+# public default String SayHi(String n1,
+# String n2,
+# String n3,
+# String n4,
+# String n5,
+# String n6,
+# String n7,
+# String n8,
+# String n9,
+# String n0) {
+# return "Hello";
+# }
+# }
+
+.class public abstract interface Liface;
+.super Ljava/lang/Object;
+
+.method public SayHi(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)Ljava/lang/String;
+ .locals 1
+ const-string v0, "Hello"
+ return-object v0
+.end method
+
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
new file mode 100755
index 0000000000..deef803813
--- /dev/null
+++ b/test/964-default-iface-init-generated/build
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+# We will be making more files than the ulimit is set to allow. Remove it temporarily.
+OLD_ULIMIT=`ulimit -S`
+ulimit -S unlimited
+
+restore_ulimit() {
+ ulimit -S "$OLD_ULIMIT"
+}
+trap 'restore_ulimit' ERR
+
+# Generate the smali files and expected.txt or fail
+./util-src/generate_smali.py ./smali ./expected.txt
+
+if [[ $@ == *"--jvm"* ]]; then
+ # Build the Java files if we are running a --jvm test
+ mkdir -p src
+ mkdir -p classes
+ ${ANDROID_BUILD_TOP}/art/tools/extract-embedded-java ./smali ./src
+ ${JAVAC} -implicit:none -d classes $(find src -name '*.java')
+fi
+
+# Build the smali files and make a dex
+${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+zip $TEST_NAME.jar classes.dex
+
+# Reset the ulimit back to its initial value
+restore_ulimit
diff --git a/test/964-default-iface-init-generated/expected.txt b/test/964-default-iface-init-generated/expected.txt
new file mode 100644
index 0000000000..1ddd65d177
--- /dev/null
+++ b/test/964-default-iface-init-generated/expected.txt
@@ -0,0 +1 @@
+This file is generated by util-src/generate_smali.py do not directly modify!
diff --git a/test/964-default-iface-init-generated/info.txt b/test/964-default-iface-init-generated/info.txt
new file mode 100644
index 0000000000..5805a86854
--- /dev/null
+++ b/test/964-default-iface-init-generated/info.txt
@@ -0,0 +1,17 @@
+Smali-based tests for interface initialization.
+
+This tests that interface initialization order is correct.
+
+Obviously needs to run under ART or a Java 8 Language runtime and compiler.
+
+When run smali test files are generated by the util-src/generate_smali.py
+script. If we run with --jvm we will use the
+$(ANDROID_BUILD_TOP)/art/tools/extract-embedded-java script to turn the smali
+into equivalent Java using the embedded Java code.
+
+Care should be taken when updating the generate_smali.py script. It should always
+return equivalent output when run multiple times and the expected output should
+be valid.
+
+Do not modify the expected.txt file. It is generated on each run by
+util-src/generate_smali.py.
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
new file mode 100755
index 0000000000..e378b061d9
--- /dev/null
+++ b/test/964-default-iface-init-generated/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if echo $@ | grep -q -- "--jvm"; then
+ ${RUN} "$@"
+else
+ ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
+fi
diff --git a/test/964-default-iface-init-generated/smali/Displayer.smali b/test/964-default-iface-init-generated/smali/Displayer.smali
new file mode 100644
index 0000000000..91280a8a42
--- /dev/null
+++ b/test/964-default-iface-init-generated/smali/Displayer.smali
@@ -0,0 +1,45 @@
+# /*
+# * Copyright (C) 2015 The Android Open Source Project
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+#
+# // This class is b/c java does not allow static {} blocks in interfaces.
+# public class Displayer {
+# public Displayer(String type) {
+# System.out.println("initialization of " + type);
+# }
+# public void touch() {
+# return;
+# }
+# }
+
+.class public LDisplayer;
+.super Ljava/lang/Object;
+
+.method public constructor <init>(Ljava/lang/String;)V
+ .locals 2
+ invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+ const-string v0, "initialization of "
+ invoke-virtual {v0, p1}, Ljava/lang/String;->concat(Ljava/lang/String;)Ljava/lang/String;
+ move-result-object v0
+ sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {v1,v0}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ return-void
+.end method
+
+.method public touch()V
+ .locals 0
+ return-void
+.end method
+
diff --git a/test/964-default-iface-init-generated/util-src/generate_smali.py b/test/964-default-iface-init-generated/util-src/generate_smali.py
new file mode 100755
index 0000000000..be2d3ba563
--- /dev/null
+++ b/test/964-default-iface-init-generated/util-src/generate_smali.py
@@ -0,0 +1,531 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Generate Smali test files for test 964.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# Allow us to import utils and mixins.
+sys.path.append(str(Path(BUILD_TOP)/"art"/"test"/"utils"/"python"))
+
+from testgen.utils import get_copyright, subtree_sizes, gensym, filter_blanks
+import testgen.mixins as mixins
+
+from functools import total_ordering
+import itertools
+import string
+
+# The max depth the tree can have.
+MAX_IFACE_DEPTH = 3
+
+class MainClass(mixins.DumpMixin, mixins.Named, mixins.SmaliFileMixin):
+ """
+ A Main.smali file containing the Main class and the main function. It will run
+ all the test functions we have.
+ """
+
+ MAIN_CLASS_TEMPLATE = """{copyright}
+
+.class public LMain;
+.super Ljava/lang/Object;
+
+# class Main {{
+
+.method public constructor <init>()V
+ .registers 1
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+{test_groups}
+
+{main_func}
+
+# }}
+"""
+
+ MAIN_FUNCTION_TEMPLATE = """
+# public static void main(String[] args) {{
+.method public static main([Ljava/lang/String;)V
+ .locals 2
+
+ {test_group_invoke}
+
+ return-void
+.end method
+# }}
+"""
+
+ TEST_GROUP_INVOKE_TEMPLATE = """
+# {test_name}();
+ invoke-static {{}}, {test_name}()V
+"""
+
+ def __init__(self):
+ """
+ Initialize this MainClass. We start out with no tests.
+ """
+ self.tests = set()
+
+ def add_test(self, ty):
+ """
+ Add a test for the concrete type 'ty'
+ """
+ self.tests.add(Func(ty))
+
+ def get_expected(self):
+ """
+ Get the expected output of this test.
+ """
+ all_tests = sorted(self.tests)
+ return filter_blanks("\n".join(a.get_expected() for a in all_tests))
+
+ def get_name(self):
+ """
+ Gets the name of this class
+ """
+ return "Main"
+
+ def __str__(self):
+ """
+ Print the smali code for this test.
+ """
+ all_tests = sorted(self.tests)
+ test_invoke = ""
+ test_groups = ""
+ for t in all_tests:
+ test_groups += str(t)
+ for t in all_tests:
+ test_invoke += self.TEST_GROUP_INVOKE_TEMPLATE.format(test_name=t.get_name())
+ main_func = self.MAIN_FUNCTION_TEMPLATE.format(test_group_invoke=test_invoke)
+
+ return self.MAIN_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ test_groups = test_groups,
+ main_func = main_func)
+
+class Func(mixins.Named, mixins.NameComparableMixin):
+ """
+ A function that tests the functionality of a concrete type. Should only be
+ constructed by MainClass.add_test.
+ """
+
+ TEST_FUNCTION_TEMPLATE = """
+# public static void {fname}() {{
+# try {{
+# System.out.println("About to initialize {tree}");
+# {farg} v = new {farg}();
+# System.out.println("Initialized {tree}");
+# v.touchAll();
+# System.out.println("All of {tree} hierarchy initialized");
+# return;
+# }} catch (Error e) {{
+# e.printStackTrace(System.out);
+# return;
+# }}
+# }}
+.method public static {fname}()V
+ .locals 7
+ :call_{fname}_try_start
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ const-string v3, "About to initialize {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ new-instance v6, L{farg};
+ invoke-direct {{v6}}, L{farg};-><init>()V
+
+ const-string v3, "Initialized {tree}"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ invoke-virtual {{v6}}, L{farg};->touchAll()V
+
+ const-string v3, "All of {tree} hierarchy initialized"
+ invoke-virtual {{v2, v3}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+
+ return-void
+ :call_{fname}_try_end
+ .catch Ljava/lang/Error; {{:call_{fname}_try_start .. :call_{fname}_try_end}} :error_{fname}_start
+ :error_{fname}_start
+ move-exception v3
+ sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ invoke-virtual {{v3,v2}}, Ljava/lang/Error;->printStackTrace(Ljava/io/PrintStream;)V
+ return-void
+.end method
+"""
+
+ OUTPUT_FORMAT = """
+About to initialize {tree}
+{initialize_output}
+Initialized {tree}
+{touch_output}
+All of {tree} hierarchy initialized
+""".strip()
+
+ def __init__(self, farg):
+ """
+ Initialize a test function for the given argument
+ """
+ self.farg = farg
+
+ def __str__(self):
+ """
+ Print the smali code for this test function.
+ """
+ return self.TEST_FUNCTION_TEMPLATE.format(fname=self.get_name(),
+ farg=self.farg.get_name(),
+ tree = self.farg.get_tree())
+
+ def get_name(self):
+ """
+ Gets the name of this test function
+ """
+ return "TEST_FUNC_{}".format(self.farg.get_name())
+
+ def get_expected(self):
+ """
+ Get the expected output of this function.
+ """
+ return self.OUTPUT_FORMAT.format(
+ tree = self.farg.get_tree(),
+ initialize_output = self.farg.get_initialize_output().strip(),
+ touch_output = self.farg.get_touch_output().strip())
+
+class TestClass(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ A class that will be instantiated to test interface initialization order.
+ """
+
+ TEST_CLASS_TEMPLATE = """{copyright}
+
+.class public L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public class {class_name} implements {ifaces} {{
+#
+# public {class_name}() {{
+# }}
+.method public constructor <init>()V
+ .locals 2
+ invoke-direct {{p0}}, Ljava/lang/Object;-><init>()V
+ return-void
+.end method
+
+# public void marker() {{
+# return;
+# }}
+.method public marker()V
+ .locals 0
+ return-void
+.end method
+
+# public void touchAll() {{
+.method public touchAll()V
+ .locals 2
+ sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream;
+ {touch_calls}
+ return-void
+.end method
+# }}
+# }}
+"""
+
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ TOUCH_CALL_TEMPLATE = """
+# System.out.println("{class_name} touching {iface_name}");
+# {iface_name}.field.touch();
+ const-string v1, "{class_name} touching {iface_name}"
+ invoke-virtual {{v0, v1}}, Ljava/io/PrintStream;->println(Ljava/lang/Object;)V
+ sget-object v1, L{iface_name};->field:LDisplayer;
+ invoke-virtual {{v1}}, LDisplayer;->touch()V
+"""
+
+ TOUCH_OUTPUT_TEMPLATE = """
+{class_name} touching {iface_name}
+{touch_output}
+""".strip()
+
+ def __init__(self, ifaces):
+ """
+ Initialize this test class which implements the given interfaces
+ """
+ self.ifaces = ifaces
+ self.class_name = "CLASS_"+gensym()
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{fname} {iftree}]".format(fname = self.get_name(), iftree = print_tree(self.ifaces))
+
+ def get_initialize_output(self):
+ return "\n".join(map(lambda i: i.get_initialize_output().strip(), dump_tree(self.ifaces)))
+
+ def get_touch_output(self):
+ return "\n".join(map(lambda a: self.TOUCH_OUTPUT_TEMPLATE.format(
+ class_name = self.class_name,
+ iface_name = a.get_name(),
+ touch_output = a.get_touch_output()).strip(),
+ self.get_all_interfaces()))
+
+ def get_all_interfaces(self):
+ """
+ Returns a set of all interfaces this class transitively implements
+ """
+ return sorted(set(dump_tree(self.ifaces)))
+
+ def __str__(self):
+ """
+ Print the smali code for this class.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ touches = '\n'.join(map(lambda a: self.TOUCH_CALL_TEMPLATE.format(class_name = self.class_name,
+ iface_name = a.get_name()),
+ self.get_all_interfaces()))
+ return self.TEST_CLASS_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ ifaces = j_ifaces,
+ class_name = self.class_name,
+ touch_calls = touches)
+
+class TestInterface(mixins.DumpMixin, mixins.Named, mixins.NameComparableMixin, mixins.SmaliFileMixin):
+ """
+ An interface that will be used to test default method resolution order.
+ """
+
+ TEST_INTERFACE_TEMPLATE = """{copyright}
+.class public abstract interface L{class_name};
+.super Ljava/lang/Object;
+{implements_spec}
+
+# public interface {class_name} {extends} {ifaces} {{
+# public static final Displayer field = new Displayer("{tree}");
+.field public final static field:LDisplayer;
+
+.method public static constructor <clinit>()V
+ .locals 3
+ const-string v2, "{tree}"
+ new-instance v1, LDisplayer;
+ invoke-direct {{v1, v2}}, LDisplayer;-><init>(Ljava/lang/String;)V
+ sput-object v1, L{class_name};->field:LDisplayer;
+ return-void
+.end method
+
+# public void marker();
+.method public abstract marker()V
+.end method
+
+{funcs}
+
+# }}
+"""
+
+ DEFAULT_FUNC_TEMPLATE = """
+# public default void {class_name}_DEFAULT_FUNC() {{
+# return;
+# }}
+.method public {class_name}_DEFAULT_FUNC()V
+ .locals 0
+ return-void
+.end method
+"""
+ IMPLEMENTS_TEMPLATE = """
+.implements L{iface_name};
+"""
+
+ OUTPUT_TEMPLATE = "initialization of {tree}"
+
+ def __init__(self, ifaces, default):
+ """
+ Initialize interface with the given super-interfaces
+ """
+ self.ifaces = ifaces
+ self.default = default
+ end = "_DEFAULT" if default else ""
+ self.class_name = "INTERFACE_"+gensym()+end
+ self.cloned = False
+ self.initialized = False
+
+ def clone(self):
+ """
+ Clones this interface, returning a new one with the same structure but
+ different name.
+ """
+ return TestInterface(tuple(map(lambda a: a.clone(), self.ifaces)), self.default)
+
+ def get_name(self):
+ """
+ Gets the name of this interface
+ """
+ return self.class_name
+
+ def __iter__(self):
+ """
+ Performs depth-first traversal of the interface tree this interface is the
+ root of. Does not filter out repeats.
+ """
+ for i in self.ifaces:
+ yield i
+ yield from i
+
+ def get_tree(self):
+ """
+ Print out a representation of the type tree of this class
+ """
+ return "[{class_name} {iftree}]".format(class_name = self.get_name(),
+ iftree = print_tree(self.ifaces))
+
+ def get_initialize_output(self):
+ """
+ Returns the expected output upon the class that implements this interface being initialized.
+ """
+ if self.default and not self.initialized:
+ self.initialized = True
+ return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
+ else:
+ return ""
+
+ def get_touch_output(self):
+ """
+ Returns the expected output upon this interface being touched.
+ """
+ if not self.default and not self.initialized:
+ self.initialized = True
+ return self.OUTPUT_TEMPLATE.format(tree = self.get_tree())
+ else:
+ return ""
+
+ def __str__(self):
+ """
+ Print the smali code for this interface.
+ """
+ s_ifaces = '\n'.join(map(lambda a: self.IMPLEMENTS_TEMPLATE.format(iface_name = a.get_name()),
+ self.ifaces))
+ j_ifaces = ', '.join(map(lambda a: a.get_name(), self.ifaces))
+ if self.default:
+ funcs = self.DEFAULT_FUNC_TEMPLATE.format(class_name = self.class_name)
+ else:
+ funcs = ""
+ return self.TEST_INTERFACE_TEMPLATE.format(copyright = get_copyright('smali'),
+ implements_spec = s_ifaces,
+ extends = "extends" if len(self.ifaces) else "",
+ ifaces = j_ifaces,
+ funcs = funcs,
+ tree = self.get_tree(),
+ class_name = self.class_name)
+
+def dump_tree(ifaces):
+ """
+ Yields all the interfaces transitively implemented by the set in
+ reverse-depth-first order
+ """
+ for i in ifaces:
+ yield from dump_tree(i.ifaces)
+ yield i
+
+def print_tree(ifaces):
+ """
+ Prints the tree for the given ifaces.
+ """
+ return " ".join(i.get_tree() for i in ifaces)
+
+def clone_all(l):
+ return tuple(a.clone() for a in l)
+
+# Cached output of subtree_sizes for speed of access.
+SUBTREES = [set(tuple(l) for l in subtree_sizes(i))
+ for i in range(MAX_IFACE_DEPTH + 1)]
+
+def create_test_classes():
+ """
+ Yield all the test classes with the different interface trees
+ """
+ for num in range(1, MAX_IFACE_DEPTH + 1):
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestClass(clone_all(supers))
+ for i in range(len(set(dump_tree(supers)) - set(supers))):
+ ns = clone_all(supers)
+ selected = sorted(set(dump_tree(ns)) - set(ns))[i]
+ yield TestClass(tuple([selected] + list(ns)))
+
+def create_interface_trees(num):
+ """
+ Yield all the interface trees up to 'num' depth.
+ """
+ if num == 0:
+ yield TestInterface(tuple(), False)
+ yield TestInterface(tuple(), True)
+ return
+ for split in SUBTREES[num]:
+ ifaces = []
+ for sub in split:
+ ifaces.append(list(create_interface_trees(sub)))
+ for supers in itertools.product(*ifaces):
+ yield TestInterface(clone_all(supers), False)
+ yield TestInterface(clone_all(supers), True)
+ # TODO Should add on some from higher up the tree.
+
+def create_all_test_files():
+ """
+ Creates all the objects representing the files in this test. They just need to
+ be dumped.
+ """
+ mc = MainClass()
+ classes = {mc}
+ for clazz in create_test_classes():
+ classes.add(clazz)
+ for i in dump_tree(clazz.ifaces):
+ classes.add(i)
+ mc.add_test(clazz)
+ return mc, classes
+
+def main(argv):
+ smali_dir = Path(argv[1])
+ if not smali_dir.exists() or not smali_dir.is_dir():
+ print("{} is not a valid smali dir".format(smali_dir), file=sys.stderr)
+ sys.exit(1)
+ expected_txt = Path(argv[2])
+ mainclass, all_files = create_all_test_files()
+ with expected_txt.open('w') as out:
+ print(mainclass.get_expected(), file=out)
+ for f in all_files:
+ f.dump(smali_dir)
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index db16b97ea6..e114a2e9f3 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -214,13 +214,29 @@ TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
055-enum-performance \
133-static-invoke-super
- # disable timing sensitive tests on "dist" builds.
+# disable timing sensitive tests on "dist" builds.
ifdef dist_goal
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
$(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
endif
+# Tests that require python3.
+TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS := \
+ 960-default-smali \
+ 961-default-iface-resolution-generated \
+ 964-default-iface-init-generated \
+
+# Check if we have python3 to run our tests.
+ifeq ($(wildcard /usr/bin/python3),)
+ $(warning "No python3 found. Disabling tests: $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS)")
+
+ # Currently disable tests requiring python3 when it is not installed.
+ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+ $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_PYTHON3_DEPENDENCY_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
@@ -313,13 +329,15 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUIL
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),130-hprof,$(ALL_ADDRESS_SIZES))
# 131 is an old test. The functionality has been implemented at an earlier stage and is checked
-# in tests 138.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# in tests 138. Blacklisted for debug builds since these builds have duplicate classes checks which
+# punt to interpreter.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),debug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),131-structural-change,$(ALL_ADDRESS_SIZES))
-# 138-duplicate-classes-check. Turned off temporarily, b/21333911.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
+# 138-duplicate-classes-check. Turned on for debug builds since debug builds have duplicate classes
+# checks enabled, b/2133391.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),ndebug,$(PREBUILD_TYPES), \
$(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
$(PICTEST_TYPES),$(DEBUGGABLE_TYPES),138-duplicate-classes-check,$(ALL_ADDRESS_SIZES))
@@ -469,6 +487,8 @@ TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
530-checker-regression-reftype-final \
532-checker-nonnull-arrayset \
534-checker-bce-deoptimization \
+ 536-checker-intrinsic-optimization \
+ 537-checker-debuggable \
ifeq (mips,$(TARGET_ARCH))
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
@@ -519,8 +539,10 @@ TEST_ART_BROKEN_OPTIMIZING_DEBUGGABLE_RUN_TESTS :=
# Tests that should fail in the read barrier configuration.
# 137: Read barrier forces interpreter. Cannot run this with the interpreter.
+# 141: Class unloading test is flaky with CC since CC seems to occasionally keep class loaders live.
TEST_ART_BROKEN_READ_BARRIER_RUN_TESTS := \
- 137-cfi
+ 137-cfi \
+ 141-class-unload
ifeq ($(ART_USE_READ_BARRIER),true)
ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \
diff --git a/test/etc/default-build b/test/etc/default-build
index c281bca3f5..c92402b529 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -26,6 +26,8 @@ while true; do
option="$1"
DX_FLAGS="${DX_FLAGS} $option"
shift
+ elif [ "x$1" = "x--jvm" ]; then
+ shift
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
diff --git a/test/run-all-tests b/test/run-all-tests
index 13490c46e4..76283b7a8d 100755
--- a/test/run-all-tests
+++ b/test/run-all-tests
@@ -41,6 +41,9 @@ while true; do
if [ "x$1" = "x--host" ]; then
run_args="${run_args} --host"
shift
+ elif [ "x$1" = "x--use-java-home" ]; then
+ run_args="${run_args} --use-java-home"
+ shift
elif [ "x$1" = "x--jvm" ]; then
run_args="${run_args} --jvm"
shift
@@ -133,7 +136,7 @@ if [ "$usage" = "yes" ]; then
echo " --debug --dev --host --interpreter --jit --jvm --no-optimize"
echo " --no-verify -O --update --valgrind --zygote --64 --relocate"
echo " --prebuild --always-clean --gcstress --gcverify --trace"
- echo " --no-patchoat --no-dex2oat"
+ echo " --no-patchoat --no-dex2oat --use-java-home"
echo " Specific Runtime Options:"
echo " --seq Run tests one-by-one, avoiding failures caused by busy CPU"
) 1>&2
diff --git a/test/run-test b/test/run-test
index a5b6e92869..1b71f33209 100755
--- a/test/run-test
+++ b/test/run-test
@@ -40,7 +40,6 @@ else
tmp_dir="${TMPDIR}/$USER/${test_dir}"
fi
checker="${progdir}/../tools/checker/checker.py"
-
export JAVA="java"
export JAVAC="javac -g"
export RUN="${progdir}/etc/run-test-jar"
@@ -155,6 +154,15 @@ while true; do
DEX_LOCATION=$tmp_dir
run_args="${run_args} --host"
shift
+ elif [ "x$1" = "x--use-java-home" ]; then
+ if [ -n "${JAVA_HOME}" ]; then
+ export JAVA="${JAVA_HOME}/bin/java"
+ export JAVAC="${JAVA_HOME}/bin/javac -g"
+ else
+ echo "Passed --use-java-home without JAVA_HOME variable set!"
+ usage="yes"
+ fi
+ shift
elif [ "x$1" = "x--jvm" ]; then
target_mode="no"
runtime="jvm"
@@ -162,6 +170,7 @@ while true; do
NEED_DEX="false"
USE_JACK="false"
run_args="${run_args} --jvm"
+ build_args="${build_args} --jvm"
shift
elif [ "x$1" = "x-O" ]; then
lib="libart.so"
@@ -560,6 +569,9 @@ if [ "$usage" = "yes" ]; then
echo " --invoke-with Pass --invoke-with option to runtime."
echo " --dalvik Use Dalvik (off by default)."
echo " --jvm Use a host-local RI virtual machine."
+ echo " --use-java-home Use the JAVA_HOME environment variable"
+ echo " to find the java compiler and runtime"
+ echo " (if applicable) to run the test with."
echo " --output-path [path] Location where to store the build" \
"files."
echo " --64 Run the test in 64-bit mode"
@@ -637,18 +649,24 @@ if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
# on a particular DEX output, keep building them with dx for now (b/19467889).
USE_JACK="false"
- if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$debuggable" = "no" ]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" ]; then
# In no-prebuild mode, the compiler is only invoked if both dex2oat and
# patchoat are available. Disable Checker otherwise (b/22552692).
if [ "$prebuild_mode" = "yes" ] || [ "$have_patchoat" = "yes" -a "$have_dex2oat" = "yes" ]; then
run_checker="yes"
+
if [ "$target_mode" = "no" ]; then
cfg_output_dir="$tmp_dir"
- checker_arch_option="--arch=${host_arch_name^^}"
+ checker_args="--arch=${host_arch_name^^}"
else
cfg_output_dir="$DEX_LOCATION"
- checker_arch_option="--arch=${target_arch_name^^}"
+ checker_args="--arch=${target_arch_name^^}"
+ fi
+
+ if [ "$debuggable" = "yes" ]; then
+ checker_args="$checker_args --debuggable"
fi
+
run_args="${run_args} -Xcompiler-option --dump-cfg=$cfg_output_dir/$cfg_output \
-Xcompiler-option -j1"
fi
@@ -702,7 +720,7 @@ if [ "$dev_mode" = "yes" ]; then
if [ "$target_mode" = "yes" ]; then
adb pull $cfg_output_dir/$cfg_output &> /dev/null
fi
- "$checker" $checker_arch_option "$cfg_output" "$tmp_dir" 2>&1
+ "$checker" $checker_args "$cfg_output" "$tmp_dir" 2>&1
checker_exit="$?"
if [ "$checker_exit" = "0" ]; then
good="yes"
@@ -727,7 +745,7 @@ elif [ "$update_mode" = "yes" ]; then
if [ "$target_mode" = "yes" ]; then
adb pull $cfg_output_dir/$cfg_output &> /dev/null
fi
- "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1
+ "$checker" -q $checker_args "$cfg_output" "$tmp_dir" >> "$output" 2>&1
fi
sed -e 's/[[:cntrl:]]$//g' < "$output" >"${td_expected}"
good="yes"
@@ -768,7 +786,7 @@ else
if [ "$target_mode" = "yes" ]; then
adb pull $cfg_output_dir/$cfg_output &> /dev/null
fi
- "$checker" -q $checker_arch_option "$cfg_output" "$tmp_dir" >> "$output" 2>&1
+ "$checker" -q $checker_args "$cfg_output" "$tmp_dir" >> "$output" 2>&1
checker_exit="$?"
if [ "$checker_exit" != "0" ]; then
echo "checker exit status: $checker_exit" 1>&2
diff --git a/test/utils/python/testgen/mixins.py b/test/utils/python/testgen/mixins.py
new file mode 100644
index 0000000000..085e51def2
--- /dev/null
+++ b/test/utils/python/testgen/mixins.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common mixins and abstract base classes (ABCs) useful for writing test generators in python
+"""
+
+import abc
+import collections.abc
+import functools
+
+class Named(metaclass=abc.ABCMeta):
+ """
+ An abc that defines a get_name method.
+ """
+
+ @abc.abstractmethod
+ def get_name(self):
+ """
+ Returns a unique name to use as the identity for implementing comparisons.
+ """
+ pass
+
+class FileLike(metaclass=abc.ABCMeta):
+ """
+ An abc that defines get_file_name and get_file_extension methods.
+ """
+
+ @abc.abstractmethod
+ def get_file_name(self):
+ """Returns the filename this object represents"""
+ pass
+
+ @abc.abstractmethod
+ def get_file_extension(self):
+ """Returns the file extension of the file this object represents"""
+ pass
+
+@functools.lru_cache(maxsize=None)
+def get_file_extension_mixin(ext):
+ """
+ Gets a mixin that defines get_file_name(self) in terms of get_name(self) with the
+ given file extension.
+ """
+
+ class FExt(object):
+ """
+ A mixin defining get_file_name(self) in terms of get_name(self)
+ """
+
+ def get_file_name(self):
+ return self.get_name() + ext
+
+ def get_file_extension(self):
+ return ext
+
+ # Register the ABCs
+ Named.register(FExt)
+ FileLike.register(FExt)
+
+ return FExt
+
+class SmaliFileMixin(get_file_extension_mixin(".smali")):
+ """
+ A mixin that defines that the file this class belongs to is get_name() + ".smali".
+ """
+ pass
+
+class NameComparableMixin(object):
+ """
+ A mixin that defines the object comparison and related functionality in terms
+ of a get_name(self) function.
+ """
+
+ def __lt__(self, other):
+ return self.get_name() < other.get_name()
+
+ def __gt__(self, other):
+ return self.get_name() > other.get_name()
+
+ def __eq__(self, other):
+ return self.get_name() == other.get_name()
+
+ def __le__(self, other):
+ return self.get_name() <= other.get_name()
+
+ def __ge__(self, other):
+ return self.get_name() >= other.get_name()
+
+ def __ne__(self, other):
+ return self.get_name() != other.get_name()
+
+ def __hash__(self):
+ return hash(self.get_name())
+
+Named.register(NameComparableMixin)
+collections.abc.Hashable.register(NameComparableMixin)
+
+class DumpMixin(metaclass=abc.ABCMeta):
+ """
+ A mixin to add support for dumping the string representation of an object to a
+ file. Requires the get_file_name(self) method be defined.
+ """
+
+ @abc.abstractmethod
+ def __str__(self):
+ """
+ Returns the data to be printed to a file by dump.
+ """
+ pass
+
+ def dump(self, directory):
+ """
+ Dump this object to a file in the given directory
+ """
+ out_file = directory / self.get_file_name()
+ if out_file.exists():
+ out_file.unlink()
+ with out_file.open('w') as out:
+ print(str(self), file=out)
+
+FileLike.register(DumpMixin)
diff --git a/test/utils/python/testgen/utils.py b/test/utils/python/testgen/utils.py
new file mode 100644
index 0000000000..769ad16ebe
--- /dev/null
+++ b/test/utils/python/testgen/utils.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python3
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Common functions useful for writing test generators in python
+"""
+
+import itertools
+import os
+import string
+from pathlib import Path
+
+BUILD_TOP = os.getenv("ANDROID_BUILD_TOP")
+if BUILD_TOP is None:
+ print("ANDROID_BUILD_TOP not set. Please run build/envsetup.sh", file=sys.stderr)
+ sys.exit(1)
+
+# An iterator which yields strings made from lowercase letters. First yields
+# all 1 length strings, then all 2 and so on. It does this alphabetically.
+NAME_GEN = itertools.chain.from_iterable(
+ map(lambda n: itertools.product(string.ascii_lowercase, repeat=n),
+ itertools.count(1)))
+
+def gensym():
+ """
+ Returns a new, globally unique, identifier name that is a valid Java symbol
+ on each call.
+ """
+ return ''.join(next(NAME_GEN))
+
+def filter_blanks(s):
+ """
+ Takes a string returns the same string sans empty lines
+ """
+ return "\n".join(a for a in s.split("\n") if a.strip() != "")
+
+def get_copyright(filetype = "java"):
+ """
+ Returns the standard copyright header for the given filetype
+ """
+ if filetype == "smali":
+ return "\n".join(map(lambda a: "# " + a, get_copyright("java").split("\n")))
+ else:
+ fname = filetype + ".txt"
+ with (Path(BUILD_TOP)/"development"/"docs"/"copyright-templates"/fname).open() as template:
+ return "".join(template.readlines())
+
+def subtree_sizes(n):
+ """
+ A generator that yields a tuple containing a possible arrangement of subtree
+ nodes for a tree with a total of 'n' leaf nodes.
+ """
+ if n == 0:
+ return
+ elif n == 1:
+ yield (0,)
+ elif n == 2:
+ yield (1, 1)
+ else:
+ for prevt in subtree_sizes(n - 1):
+ prev = list(prevt)
+ yield tuple([1] + prev)
+ for i in range(len(prev)):
+ prev[i] += 1
+ yield tuple(prev)
+ prev[i] -= 1
+
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index a8e3884077..1083c2f42f 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -31,9 +31,6 @@ TODO:
another) How about, always sort by name?
* For long strings, limit the string length shown in the summary view to
something reasonable. Say 50 chars, then add a "..." at the end.
- * For string summaries, if the string is an offset into a bigger byte array,
- make sure to show just the part that's in the bigger byte array, not the
- entire byte array.
* For HeapTable with single heap shown, the heap name isn't centered?
* Consistently document functions.
* Should help be part of an AhatHandler, that automatically gets the menu and
@@ -72,6 +69,8 @@ Things to Test:
time.
* That we don't show the 'extra' column in the DominatedList if we are
showing all the instances.
+ * That InstanceUtils.asString properly takes into account "offset" and
+ "count" fields, if they are present.
Reported Issues:
* Request to be able to sort tables by size.
diff --git a/tools/ahat/src/InstanceUtils.java b/tools/ahat/src/InstanceUtils.java
index a6ac3b8765..eb9e363d8c 100644
--- a/tools/ahat/src/InstanceUtils.java
+++ b/tools/ahat/src/InstanceUtils.java
@@ -37,22 +37,6 @@ class InstanceUtils {
}
/**
- * Read the char[] value from an hprof Instance.
- * Returns null if the object can't be interpreted as a char[].
- */
- private static char[] asCharArray(Instance inst) {
- if (! (inst instanceof ArrayInstance)) {
- return null;
- }
-
- ArrayInstance array = (ArrayInstance) inst;
- if (array.getArrayType() != Type.CHAR) {
- return null;
- }
- return array.asCharArray(0, array.getValues().length);
- }
-
- /**
* Read the byte[] value from an hprof Instance.
* Returns null if the instance is not a byte array.
*/
@@ -82,8 +66,32 @@ class InstanceUtils {
if (!isInstanceOfClass(inst, "java.lang.String")) {
return null;
}
- char[] value = getCharArrayField(inst, "value");
- return (value == null) ? null : new String(value);
+
+ Object value = getField(inst, "value");
+ if (!(value instanceof ArrayInstance)) {
+ return null;
+ }
+
+ ArrayInstance chars = (ArrayInstance) value;
+ if (chars.getArrayType() != Type.CHAR) {
+ return null;
+ }
+
+ // TODO: When perflib provides a better way to get the length of the
+ // array, we should use that here.
+ int numChars = chars.getValues().length;
+ int count = getIntField(inst, "count", numChars);
+ int offset = getIntField(inst, "offset", 0);
+ int end = offset + count - 1;
+
+ if (count == 0) {
+ return "";
+ }
+
+ if (offset >= 0 && offset < numChars && end >= 0 && end < numChars) {
+ return new String(chars.asCharArray(offset, count));
+ }
+ return null;
}
/**
@@ -175,6 +183,15 @@ class InstanceUtils {
}
/**
+ * Read an int field of an instance, returning a default value if the field
+ * was not an int or could not be read.
+ */
+ private static int getIntField(Instance inst, String fieldName, int def) {
+ Integer value = getIntField(inst, fieldName);
+ return value == null ? def : value;
+ }
+
+ /**
* Read the given field from the given instance.
* The field is assumed to be a byte[] field.
* Returns null if the field value is null, not a byte[] or could not be read.
@@ -187,14 +204,6 @@ class InstanceUtils {
return asByteArray((Instance)value);
}
- private static char[] getCharArrayField(Instance inst, String fieldName) {
- Object value = getField(inst, fieldName);
- if (!(value instanceof Instance)) {
- return null;
- }
- return asCharArray((Instance)value);
- }
-
// Return the bitmap instance associated with this object, or null if there
// is none. This works for android.graphics.Bitmap instances and their
// underlying Byte[] instances.
diff --git a/tools/art b/tools/art
index 676d6aeaf2..304a9d03f6 100644
--- a/tools/art
+++ b/tools/art
@@ -89,6 +89,7 @@ if [ z"$PERF" != z ]; then
invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
fi
+# We use the PIC core image to work with perf.
ANDROID_DATA=$ANDROID_DATA \
ANDROID_ROOT=$ANDROID_ROOT \
LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
@@ -97,7 +98,7 @@ ANDROID_DATA=$ANDROID_DATA \
$invoke_with $ANDROID_ROOT/bin/$DALVIKVM $lib \
-XXlib:$LIBART \
-Xnorelocate \
- -Ximage:$ANDROID_ROOT/framework/core.art \
+ -Ximage:$ANDROID_ROOT/framework/core-optimizing-pic.art \
-Xcompiler-option --generate-debug-info \
"$@"
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index de9b35d3ea..631e0a0c59 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -19,7 +19,9 @@ if [ ! -d art ]; then
exit 1
fi
-common_targets="vogar vogar.jar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests"
+out_dir=${OUT_DIR-out}
+java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
+common_targets="vogar vogar.jar ${java_libraries_dir}/core-tests_intermediates/javalib.jar apache-harmony-jdwp-tests-hostdex ${java_libraries_dir}/jsr166-tests_intermediates/javalib.jar"
mode="target"
j_arg="-j$(nproc)"
showcommands=
@@ -44,9 +46,9 @@ while true; do
done
if [[ $mode == "host" ]]; then
- make_command="make $j_arg $showcommands build-art-host-tests $common_targets out/host/linux-x86/lib/libjavacoretests.so out/host/linux-x86/lib64/libjavacoretests.so"
+ make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so"
elif [[ $mode == "target" ]]; then
- make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh out/host/linux-x86/bin/adb"
+ make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb"
fi
echo "Executing $make_command"
diff --git a/tools/checker/checker.py b/tools/checker/checker.py
index bc5e17da6a..2e9faba9fb 100755
--- a/tools/checker/checker.py
+++ b/tools/checker/checker.py
@@ -36,7 +36,9 @@ def ParseArguments():
parser.add_argument("--dump-pass", dest="dump_pass", metavar="PASS",
help="print a compiler pass dump")
parser.add_argument("--arch", dest="arch", choices=archs_list,
- help="Run the tests for the specified target architecture.")
+ help="Run tests for the specified target architecture.")
+ parser.add_argument("--debuggable", action="store_true",
+ help="Run tests for debuggable code.")
parser.add_argument("-q", "--quiet", action="store_true",
help="print only errors")
return parser.parse_args()
@@ -83,13 +85,13 @@ def FindCheckerFiles(path):
Logger.fail("Source path \"" + path + "\" not found")
-def RunTests(checkPrefix, checkPath, outputFilename, targetArch):
+def RunTests(checkPrefix, checkPath, outputFilename, targetArch, debuggableMode):
c1File = ParseC1visualizerStream(os.path.basename(outputFilename), open(outputFilename, "r"))
for checkFilename in FindCheckerFiles(checkPath):
checkerFile = ParseCheckerStream(os.path.basename(checkFilename),
checkPrefix,
open(checkFilename, "r"))
- MatchFiles(checkerFile, c1File, targetArch)
+ MatchFiles(checkerFile, c1File, targetArch, debuggableMode)
if __name__ == "__main__":
@@ -103,4 +105,4 @@ if __name__ == "__main__":
elif args.dump_pass:
DumpPass(args.tested_file, args.dump_pass)
else:
- RunTests(args.check_prefix, args.source_path, args.tested_file, args.arch)
+ RunTests(args.check_prefix, args.source_path, args.tested_file, args.arch, args.debuggable)
diff --git a/tools/checker/file_format/checker/parser.py b/tools/checker/file_format/checker/parser.py
index 446302fed2..f199a50ebe 100644
--- a/tools/checker/file_format/checker/parser.py
+++ b/tools/checker/file_format/checker/parser.py
@@ -22,7 +22,7 @@ import re
def __isCheckerLine(line):
return line.startswith("///") or line.startswith("##")
-def __extractLine(prefix, line, arch = None):
+def __extractLine(prefix, line, arch = None, debuggable = False):
""" Attempts to parse a check line. The regex searches for a comment symbol
followed by the CHECK keyword, given attribute and a colon at the very
beginning of the line. Whitespaces are ignored.
@@ -30,10 +30,11 @@ def __extractLine(prefix, line, arch = None):
rIgnoreWhitespace = r"\s*"
rCommentSymbols = [r"///", r"##"]
arch_specifier = r"-%s" % arch if arch is not None else r""
+ dbg_specifier = r"-DEBUGGABLE" if debuggable else r""
regexPrefix = rIgnoreWhitespace + \
r"(" + r"|".join(rCommentSymbols) + r")" + \
rIgnoreWhitespace + \
- prefix + arch_specifier + r":"
+ prefix + arch_specifier + dbg_specifier + r":"
# The 'match' function succeeds only if the pattern is matched at the
# beginning of the line.
@@ -56,10 +57,11 @@ def __processLine(line, lineNo, prefix, fileName):
# Lines beginning with 'CHECK-START' start a new test case.
# We currently only consider the architecture suffix in "CHECK-START" lines.
- for arch in [None] + archs_list:
- startLine = __extractLine(prefix + "-START", line, arch)
- if startLine is not None:
- return None, startLine, arch
+ for debuggable in [True, False]:
+ for arch in [None] + archs_list:
+ startLine = __extractLine(prefix + "-START", line, arch, debuggable)
+ if startLine is not None:
+ return None, startLine, (arch, debuggable)
# Lines starting only with 'CHECK' are matched in order.
plainLine = __extractLine(prefix, line)
@@ -167,9 +169,11 @@ def ParseCheckerStream(fileName, prefix, stream):
fnProcessLine = lambda line, lineNo: __processLine(line, lineNo, prefix, fileName)
fnLineOutsideChunk = lambda line, lineNo: \
Logger.fail("Checker line not inside a group", fileName, lineNo)
- for caseName, caseLines, startLineNo, testArch in \
+ for caseName, caseLines, startLineNo, testData in \
SplitStream(stream, fnProcessLine, fnLineOutsideChunk):
- testCase = TestCase(checkerFile, caseName, startLineNo, testArch)
+ testArch = testData[0]
+ forDebuggable = testData[1]
+ testCase = TestCase(checkerFile, caseName, startLineNo, testArch, forDebuggable)
for caseLine in caseLines:
ParseCheckerAssertion(testCase, caseLine[0], caseLine[1], caseLine[2])
return checkerFile
diff --git a/tools/checker/file_format/checker/struct.py b/tools/checker/file_format/checker/struct.py
index 7ee09cdb84..a31aa54c91 100644
--- a/tools/checker/file_format/checker/struct.py
+++ b/tools/checker/file_format/checker/struct.py
@@ -36,7 +36,7 @@ class CheckerFile(PrintableMixin):
class TestCase(PrintableMixin):
- def __init__(self, parent, name, startLineNo, testArch = None):
+ def __init__(self, parent, name, startLineNo, testArch = None, forDebuggable = False):
assert isinstance(parent, CheckerFile)
self.parent = parent
@@ -44,6 +44,7 @@ class TestCase(PrintableMixin):
self.assertions = []
self.startLineNo = startLineNo
self.testArch = testArch
+ self.forDebuggable = forDebuggable
if not self.name:
Logger.fail("Test case does not have a name", self.fileName, self.startLineNo)
diff --git a/tools/checker/file_format/checker/test.py b/tools/checker/file_format/checker/test.py
index 495dabc588..579c190d21 100644
--- a/tools/checker/file_format/checker/test.py
+++ b/tools/checker/file_format/checker/test.py
@@ -290,7 +290,7 @@ class CheckerParser_FileLayoutTest(unittest.TestCase):
/// CHECK-NEXT: bar
""")
-class CheckerParser_ArchTests(unittest.TestCase):
+class CheckerParser_SuffixTests(unittest.TestCase):
noarch_block = """
/// CHECK-START: Group
@@ -308,11 +308,12 @@ class CheckerParser_ArchTests(unittest.TestCase):
/// CHECK-DAG: yoyo
"""
+ def parse(self, checkerText):
+ return ParseCheckerStream("<test_file>", "CHECK", io.StringIO(ToUnicode(checkerText)))
+
def test_NonArchTests(self):
for arch in [None] + archs_list:
- checkerFile = ParseCheckerStream("<test-file>",
- "CHECK",
- io.StringIO(ToUnicode(self.noarch_block)))
+ checkerFile = self.parse(self.noarch_block)
self.assertEqual(len(checkerFile.testCases), 1)
self.assertEqual(len(checkerFile.testCases[0].assertions), 4)
@@ -320,9 +321,7 @@ class CheckerParser_ArchTests(unittest.TestCase):
for targetArch in archs_list:
for testArch in [a for a in archs_list if a != targetArch]:
checkerText = self.arch_block.format(test_arch = testArch)
- checkerFile = ParseCheckerStream("<test-file>",
- "CHECK",
- io.StringIO(ToUnicode(checkerText)))
+ checkerFile = self.parse(checkerText)
self.assertEqual(len(checkerFile.testCases), 1)
self.assertEqual(len(checkerFile.testCasesForArch(testArch)), 1)
self.assertEqual(len(checkerFile.testCasesForArch(targetArch)), 0)
@@ -330,13 +329,42 @@ class CheckerParser_ArchTests(unittest.TestCase):
def test_Arch(self):
for arch in archs_list:
checkerText = self.arch_block.format(test_arch = arch)
- checkerFile = ParseCheckerStream("<test-file>",
- "CHECK",
- io.StringIO(ToUnicode(checkerText)))
+ checkerFile = self.parse(checkerText)
self.assertEqual(len(checkerFile.testCases), 1)
self.assertEqual(len(checkerFile.testCasesForArch(arch)), 1)
self.assertEqual(len(checkerFile.testCases[0].assertions), 4)
+ def test_NoDebugAndArch(self):
+ testCase = self.parse("""
+ /// CHECK-START: Group
+ /// CHECK: foo
+ """).testCases[0]
+ self.assertFalse(testCase.forDebuggable)
+ self.assertEqual(testCase.testArch, None)
+
+ def test_SetDebugNoArch(self):
+ testCase = self.parse("""
+ /// CHECK-START-DEBUGGABLE: Group
+ /// CHECK: foo
+ """).testCases[0]
+ self.assertTrue(testCase.forDebuggable)
+ self.assertEqual(testCase.testArch, None)
+
+ def test_NoDebugSetArch(self):
+ testCase = self.parse("""
+ /// CHECK-START-ARM: Group
+ /// CHECK: foo
+ """).testCases[0]
+ self.assertFalse(testCase.forDebuggable)
+ self.assertEqual(testCase.testArch, "ARM")
+
+ def test_SetDebugAndArch(self):
+ testCase = self.parse("""
+ /// CHECK-START-ARM-DEBUGGABLE: Group
+ /// CHECK: foo
+ """).testCases[0]
+ self.assertTrue(testCase.forDebuggable)
+ self.assertEqual(testCase.testArch, "ARM")
class CheckerParser_EvalTests(unittest.TestCase):
def parseTestCase(self, string):
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index 6601a1e965..3ded07482f 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -159,10 +159,13 @@ def MatchTestCase(testCase, c1Pass):
matchFrom = match.scope.end + 1
variables = match.variables
-def MatchFiles(checkerFile, c1File, targetArch):
+def MatchFiles(checkerFile, c1File, targetArch, debuggableMode):
for testCase in checkerFile.testCases:
if testCase.testArch not in [None, targetArch]:
continue
+ if testCase.forDebuggable != debuggableMode:
+ continue
+
# TODO: Currently does not handle multiple occurrences of the same group
# name, e.g. when a pass is run multiple times. It will always try to
# match a check group against the first output group of the same name.
diff --git a/tools/checker/run_unit_tests.py b/tools/checker/run_unit_tests.py
index 2e8f2083b5..a0d274df25 100755
--- a/tools/checker/run_unit_tests.py
+++ b/tools/checker/run_unit_tests.py
@@ -19,7 +19,7 @@ from file_format.c1visualizer.test import C1visualizerParser_Test
from file_format.checker.test import CheckerParser_PrefixTest, \
CheckerParser_TestExpressionTest, \
CheckerParser_FileLayoutTest, \
- CheckerParser_ArchTests, \
+ CheckerParser_SuffixTests, \
CheckerParser_EvalTests
from match.test import MatchLines_Test, \
MatchFiles_Test
diff --git a/tools/dmtracedump/Android.mk b/tools/dmtracedump/Android.mk
new file mode 100644
index 0000000000..da0d632850
--- /dev/null
+++ b/tools/dmtracedump/Android.mk
@@ -0,0 +1,32 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Java method trace dump tool
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := cc
+LOCAL_SRC_FILES := tracedump.cc
+LOCAL_CFLAGS += -O0 -g -Wall
+LOCAL_MODULE_HOST_OS := darwin linux windows
+LOCAL_MODULE := dmtracedump
+include $(BUILD_HOST_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := cc
+LOCAL_SRC_FILES := createtesttrace.cc
+LOCAL_CFLAGS += -O0 -g -Wall
+LOCAL_MODULE := create_test_dmtrace
+include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/dmtracedump/createtesttrace.cc b/tools/dmtracedump/createtesttrace.cc
new file mode 100644
index 0000000000..444cce4082
--- /dev/null
+++ b/tools/dmtracedump/createtesttrace.cc
@@ -0,0 +1,449 @@
+/*
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Create a test file in the format required by dmtrace.
+ */
+#include "profile.h" // from VM header
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+/*
+ * Values from the header of the data file.
+ */
+typedef struct DataHeader {
+ uint32_t magic;
+ int16_t version;
+ int16_t offsetToData;
+ int64_t startWhen;
+} DataHeader;
+
+#define VERSION 2
+int32_t versionNumber = VERSION;
+int32_t verbose = 0;
+
+DataHeader header = {0x574f4c53, VERSION, sizeof(DataHeader), 0LL};
+
+const char* versionHeader = "*version\n";
+const char* clockDef = "clock=thread-cpu\n";
+
+const char* keyThreads =
+ "*threads\n"
+ "1 main\n"
+ "2 foo\n"
+ "3 bar\n"
+ "4 blah\n";
+
+const char* keyEnd = "*end\n";
+
+typedef struct dataRecord {
+ uint32_t time;
+ int32_t threadId;
+ uint32_t action; /* 0=entry, 1=exit, 2=exception exit */
+ char* fullName;
+ char* className;
+ char* methodName;
+ char* signature;
+ uint32_t methodId;
+} dataRecord;
+
+dataRecord* records;
+
+#define BUF_SIZE 1024
+char buf[BUF_SIZE];
+
+typedef struct stack {
+ dataRecord** frames;
+ int32_t indentLevel;
+} stack;
+
+/* Mac OS doesn't have strndup(), so implement it here.
+ */
+char* strndup(const char* src, size_t len) {
+ char* dest = new char[len + 1];
+ strncpy(dest, src, len);
+ dest[len] = 0;
+ return dest;
+}
+
+/*
+ * Parse the input file. It looks something like this:
+ * # This is a comment line
+ * 4 1 A
+ * 6 1 B
+ * 8 1 B
+ * 10 1 A
+ *
+ * where the first column is the time, the second column is the thread id,
+ * and the third column is the method (actually just the class name). The
+ * number of spaces between the 2nd and 3rd columns is the indentation and
+ * determines the call stack. Each called method must be indented by one
+ * more space. In the example above, A is called at time 4, A calls B at
+ * time 6, B returns at time 8, and A returns at time 10. Thread 1 is the
+ * only thread that is running.
+ *
+ * An alternative file format leaves out the first two columns:
+ * A
+ * B
+ * B
+ * A
+ *
+ * In this file format, the thread id is always 1, and the time starts at
+ * 2 and increments by 2 for each line.
+ */
+void parseInputFile(const char* inputFileName) {
+ FILE* inputFp = fopen(inputFileName, "r");
+ if (inputFp == nullptr) {
+ perror(inputFileName);
+ exit(1);
+ }
+
+ /* Count the number of lines in the buffer */
+ int32_t numRecords = 0;
+ int32_t maxThreadId = 1;
+ int32_t maxFrames = 0;
+ char* indentEnd;
+ while (fgets(buf, BUF_SIZE, inputFp)) {
+ char* cp = buf;
+ if (*cp == '#') continue;
+ numRecords += 1;
+ if (isdigit(*cp)) {
+ while (isspace(*cp)) cp += 1;
+ int32_t threadId = strtoul(cp, &cp, 0);
+ if (maxThreadId < threadId) maxThreadId = threadId;
+ }
+ indentEnd = cp;
+ while (isspace(*indentEnd)) indentEnd += 1;
+ if (indentEnd - cp + 1 > maxFrames) maxFrames = indentEnd - cp + 1;
+ }
+ int32_t numThreads = maxThreadId + 1;
+
+ /* Add space for a sentinel record at the end */
+ numRecords += 1;
+ records = new dataRecord[numRecords];
+ stack* callStack = new stack[numThreads];
+ for (int32_t ii = 0; ii < numThreads; ++ii) {
+ callStack[ii].frames = nullptr;
+ callStack[ii].indentLevel = 0;
+ }
+
+ rewind(inputFp);
+
+ uint32_t time = 0;
+ int32_t linenum = 0;
+ int32_t nextRecord = 0;
+ int32_t indentLevel = 0;
+ while (fgets(buf, BUF_SIZE, inputFp)) {
+ uint32_t threadId;
+ int32_t len;
+ int32_t indent;
+ int32_t action;
+ char* save_cp;
+
+ linenum += 1;
+ char* cp = buf;
+
+ /* Skip lines that start with '#' */
+ if (*cp == '#') continue;
+
+ /* Get time and thread id */
+ if (!isdigit(*cp)) {
+ /* If the line does not begin with a digit, then fill in
+ * default values for the time and threadId.
+ */
+ time += 2;
+ threadId = 1;
+ } else {
+ time = strtoul(cp, &cp, 0);
+ while (isspace(*cp)) cp += 1;
+ threadId = strtoul(cp, &cp, 0);
+ cp += 1;
+ }
+
+ // Allocate space for the thread stack, if necessary
+ if (callStack[threadId].frames == nullptr) {
+ dataRecord** stk = new dataRecord*[maxFrames];
+ callStack[threadId].frames = stk;
+ }
+ indentLevel = callStack[threadId].indentLevel;
+
+ save_cp = cp;
+ while (isspace(*cp)) {
+ cp += 1;
+ }
+ indent = cp - save_cp + 1;
+ records[nextRecord].time = time;
+ records[nextRecord].threadId = threadId;
+
+ save_cp = cp;
+ while (*cp != '\n') cp += 1;
+
+ /* Remove trailing spaces */
+ cp -= 1;
+ while (isspace(*cp)) cp -= 1;
+ cp += 1;
+ len = cp - save_cp;
+ records[nextRecord].fullName = strndup(save_cp, len);
+
+ /* Parse the name to support "class.method signature" */
+ records[nextRecord].className = nullptr;
+ records[nextRecord].methodName = nullptr;
+ records[nextRecord].signature = nullptr;
+ cp = strchr(save_cp, '.');
+ if (cp) {
+ len = cp - save_cp;
+ if (len > 0) records[nextRecord].className = strndup(save_cp, len);
+ save_cp = cp + 1;
+ cp = strchr(save_cp, ' ');
+ if (cp == nullptr) cp = strchr(save_cp, '\n');
+ if (cp && cp > save_cp) {
+ len = cp - save_cp;
+ records[nextRecord].methodName = strndup(save_cp, len);
+ save_cp = cp + 1;
+ cp = strchr(save_cp, ' ');
+ if (cp == nullptr) cp = strchr(save_cp, '\n');
+ if (cp && cp > save_cp) {
+ len = cp - save_cp;
+ records[nextRecord].signature = strndup(save_cp, len);
+ }
+ }
+ }
+
+ if (verbose) {
+ printf("Indent: %d; IndentLevel: %d; Line: %s", indent, indentLevel, buf);
+ }
+
+ action = 0;
+ if (indent == indentLevel + 1) { // Entering a method
+ if (verbose) printf(" Entering %s\n", records[nextRecord].fullName);
+ callStack[threadId].frames[indentLevel] = &records[nextRecord];
+ } else if (indent == indentLevel) { // Exiting a method
+ // Exiting method must be currently on top of stack (unless stack is
+ // empty)
+ if (callStack[threadId].frames[indentLevel - 1] == nullptr) {
+ if (verbose)
+ printf(" Exiting %s (past bottom of stack)\n",
+ records[nextRecord].fullName);
+ callStack[threadId].frames[indentLevel - 1] = &records[nextRecord];
+ action = 1;
+ } else {
+ if (indentLevel < 1) {
+ fprintf(stderr, "Error: line %d: %s", linenum, buf);
+ fprintf(stderr, " expected positive (>0) indentation, found %d\n",
+ indent);
+ exit(1);
+ }
+ char* name = callStack[threadId].frames[indentLevel - 1]->fullName;
+ if (strcmp(name, records[nextRecord].fullName) == 0) {
+ if (verbose) printf(" Exiting %s\n", name);
+ action = 1;
+ } else { // exiting method doesn't match stack's top method
+ fprintf(stderr, "Error: line %d: %s", linenum, buf);
+ fprintf(stderr, " expected exit from %s\n",
+ callStack[threadId].frames[indentLevel - 1]->fullName);
+ exit(1);
+ }
+ }
+ } else {
+ if (nextRecord != 0) {
+ fprintf(stderr, "Error: line %d: %s", linenum, buf);
+ fprintf(stderr, " expected indentation %d [+1], found %d\n",
+ indentLevel, indent);
+ exit(1);
+ }
+
+ if (verbose) {
+ printf(" Nonzero indent at first record\n");
+ printf(" Entering %s\n", records[nextRecord].fullName);
+ }
+
+ // This is the first line of data, so we allow a larger
+ // initial indent. This allows us to test popping off more
+ // frames than we entered.
+ indentLevel = indent - 1;
+ callStack[threadId].frames[indentLevel] = &records[nextRecord];
+ }
+
+ if (action == 0)
+ indentLevel += 1;
+ else
+ indentLevel -= 1;
+ records[nextRecord].action = action;
+ callStack[threadId].indentLevel = indentLevel;
+
+ nextRecord += 1;
+ }
+
+ /* Mark the last record with a sentinel */
+ memset(&records[nextRecord], 0, sizeof(dataRecord));
+}
+
+/*
+ * Write values to the binary data file.
+ */
+void write2LE(FILE* fp, uint16_t val) {
+ putc(val & 0xff, fp);
+ putc(val >> 8, fp);
+}
+
+void write4LE(FILE* fp, uint32_t val) {
+ putc(val & 0xff, fp);
+ putc((val >> 8) & 0xff, fp);
+ putc((val >> 16) & 0xff, fp);
+ putc((val >> 24) & 0xff, fp);
+}
+
+void write8LE(FILE* fp, uint64_t val) {
+ putc(val & 0xff, fp);
+ putc((val >> 8) & 0xff, fp);
+ putc((val >> 16) & 0xff, fp);
+ putc((val >> 24) & 0xff, fp);
+ putc((val >> 32) & 0xff, fp);
+ putc((val >> 40) & 0xff, fp);
+ putc((val >> 48) & 0xff, fp);
+ putc((val >> 56) & 0xff, fp);
+}
+
+void writeDataRecord(FILE* dataFp, int32_t threadId, uint32_t methodVal, uint32_t elapsedTime) {
+ if (versionNumber == 1)
+ putc(threadId, dataFp);
+ else
+ write2LE(dataFp, threadId);
+ write4LE(dataFp, methodVal);
+ write4LE(dataFp, elapsedTime);
+}
+
+void writeDataHeader(FILE* dataFp) {
+ struct timeval tv;
+ struct timezone tz;
+
+ gettimeofday(&tv, &tz);
+ uint64_t startTime = tv.tv_sec;
+ startTime = (startTime << 32) | tv.tv_usec;
+ header.version = versionNumber;
+ write4LE(dataFp, header.magic);
+ write2LE(dataFp, header.version);
+ write2LE(dataFp, header.offsetToData);
+ write8LE(dataFp, startTime);
+}
+
+void writeKeyMethods(FILE* keyFp) {
+ const char* methodStr = "*methods\n";
+ fwrite(methodStr, strlen(methodStr), 1, keyFp);
+
+ /* Assign method ids in multiples of 4 */
+ uint32_t methodId = 0;
+ for (dataRecord* pRecord = records; pRecord->fullName; ++pRecord) {
+ if (pRecord->methodId) continue;
+ uint32_t id = ++methodId << 2;
+ pRecord->methodId = id;
+
+ /* Assign this id to all the other records that have the
+ * same name.
+ */
+ for (dataRecord* pNext = pRecord + 1; pNext->fullName; ++pNext) {
+ if (pNext->methodId) continue;
+ if (strcmp(pRecord->fullName, pNext->fullName) == 0) pNext->methodId = id;
+ }
+ if (pRecord->className == nullptr || pRecord->methodName == nullptr) {
+ fprintf(keyFp, "%#x %s m ()\n", pRecord->methodId,
+ pRecord->fullName);
+ } else if (pRecord->signature == nullptr) {
+ fprintf(keyFp, "%#x %s %s ()\n", pRecord->methodId,
+ pRecord->className, pRecord->methodName);
+ } else {
+ fprintf(keyFp, "%#x %s %s %s\n", pRecord->methodId,
+ pRecord->className, pRecord->methodName, pRecord->signature);
+ }
+ }
+}
+
+void writeKeys(FILE* keyFp) {
+ fprintf(keyFp, "%s%d\n%s", versionHeader, versionNumber, clockDef);
+ fwrite(keyThreads, strlen(keyThreads), 1, keyFp);
+ writeKeyMethods(keyFp);
+ fwrite(keyEnd, strlen(keyEnd), 1, keyFp);
+}
+
+void writeDataRecords(FILE* dataFp) {
+ for (dataRecord* pRecord = records; pRecord->fullName; ++pRecord) {
+ uint32_t val = METHOD_COMBINE(pRecord->methodId, pRecord->action);
+ writeDataRecord(dataFp, pRecord->threadId, val, pRecord->time);
+ }
+}
+
+void writeTrace(const char* traceFileName) {
+ FILE* fp = fopen(traceFileName, "w");
+ if (fp == nullptr) {
+ perror(traceFileName);
+ exit(1);
+ }
+ writeKeys(fp);
+ writeDataHeader(fp);
+ writeDataRecords(fp);
+ fclose(fp);
+}
+
+int32_t parseOptions(int32_t argc, char** argv) {
+ int32_t err = 0;
+ while (1) {
+ int32_t opt = getopt(argc, argv, "v:d");
+ if (opt == -1) break;
+ switch (opt) {
+ case 'v':
+ versionNumber = strtoul(optarg, nullptr, 0);
+ if (versionNumber != 1 && versionNumber != 2) {
+ fprintf(stderr, "Error: version number (%d) must be 1 or 2\n", versionNumber);
+ err = 1;
+ }
+ break;
+ case 'd':
+ verbose = 1;
+ break;
+ default:
+ err = 1;
+ break;
+ }
+ }
+ return err;
+}
+
+int32_t main(int32_t argc, char** argv) {
+ char* inputFile;
+ char* traceFileName = nullptr;
+
+ if (parseOptions(argc, argv) || argc - optind != 2) {
+ fprintf(stderr, "Usage: %s [-v version] [-d] input_file trace_prefix\n", argv[0]);
+ exit(1);
+ }
+
+ inputFile = argv[optind++];
+ parseInputFile(inputFile);
+ traceFileName = argv[optind++];
+
+ writeTrace(traceFileName);
+
+ return 0;
+}
diff --git a/tools/dmtracedump/dmtracedump.pl b/tools/dmtracedump/dmtracedump.pl
new file mode 100755
index 0000000000..6e487c60ae
--- /dev/null
+++ b/tools/dmtracedump/dmtracedump.pl
@@ -0,0 +1,18 @@
+#!/usr/bin/perl
+
+opendir(DIR, ".") || die "can't opendir $some_dir: $!";
+@traces = grep { /.*\.dmtrace\.data/ } readdir(DIR);
+
+foreach (@traces)
+{
+ $input = $_;
+ $input =~ s/\.data$//;
+
+ $output = "$input.html";
+
+ print("dmtracedump -h -p $input > $output\n");
+ system("dmtracedump -h -p '$input' > '$output'");
+
+}
+
+closedir DIR;
diff --git a/tools/dmtracedump/dumpdir.sh b/tools/dmtracedump/dumpdir.sh
new file mode 100644
index 0000000000..81992a2a8f
--- /dev/null
+++ b/tools/dmtracedump/dumpdir.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+FILES=`ls $1/*.data | sed "s/^\\(.*\\).data$/\\1/"`
+
+mkdir -p $2
+
+for F in $FILES
+do
+ G=$2/`echo $F | sed "s/.*\\///g"`.html
+ dmtracedump -h -p $F > $G
+done
diff --git a/tools/dmtracedump/profile.h b/tools/dmtracedump/profile.h
new file mode 100644
index 0000000000..8182352a7f
--- /dev/null
+++ b/tools/dmtracedump/profile.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Android's method call profiling goodies.
+ */
+#ifndef ART_TOOLS_DMTRACEDUMP_PROFILE_H_
+#define ART_TOOLS_DMTRACEDUMP_PROFILE_H_
+
+/*
+ * Enumeration for the two "action" bits.
+ */
+enum {
+ METHOD_TRACE_ENTER = 0x00, // method entry
+ METHOD_TRACE_EXIT = 0x01, // method exit
+ METHOD_TRACE_UNROLL = 0x02, // method exited by exception unrolling
+ // 0x03 currently unused
+};
+
+#define TOKEN_CHAR '*'
+
+/*
+ * Common definitions, shared with the dump tool.
+ */
+#define METHOD_ACTION_MASK 0x03 /* two bits */
+#define METHOD_ID(_method) ((_method) & (~METHOD_ACTION_MASK))
+#define METHOD_ACTION(_method) (((unsigned int)(_method)) & METHOD_ACTION_MASK)
+#define METHOD_COMBINE(_method, _action) ((_method) | (_action))
+
+#endif // ART_TOOLS_DMTRACEDUMP_PROFILE_H_
diff --git a/tools/dmtracedump/tracedump.cc b/tools/dmtracedump/tracedump.cc
new file mode 100644
index 0000000000..f70e2c2207
--- /dev/null
+++ b/tools/dmtracedump/tracedump.cc
@@ -0,0 +1,2616 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Process dmtrace output.
+ *
+ * This is the wrong way to go about it -- C is a clumsy language for
+ * shuffling data around. It'll do for a first pass.
+ */
+#include "profile.h" // from VM header
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+/* Version number in the key file.
+ * Version 1 uses one byte for the thread id.
+ * Version 2 uses two bytes for the thread ids.
+ * Version 3 encodes the record size and adds an optional extra timestamp field.
+ */
+int32_t versionNumber;
+
+/* arbitrarily limit indentation */
+#define MAX_STACK_DEPTH 10000
+
+/* thread list in key file is not reliable, so just max out */
+#define MAX_THREADS 32768
+
+/* Size of temporary buffers for escaping html strings */
+#define HTML_BUFSIZE 10240
+
+const char* htmlHeader =
+ "<html>\n<head>\n<script type=\"text/javascript\" "
+ "src=\"%ssortable.js\"></script>\n"
+ "<script langugage=\"javascript\">\n"
+ "function toggle(item) {\n"
+ " obj=document.getElementById(item);\n"
+ " visible=(obj.style.display!=\"none\" && obj.style.display!=\"\");\n"
+ " key=document.getElementById(\"x\" + item);\n"
+ " if (visible) {\n"
+ " obj.style.display=\"none\";\n"
+ " key.innerHTML=\"+\";\n"
+ " } else {\n"
+ " obj.style.display=\"block\";\n"
+ " key.innerHTML=\"-\";\n"
+ " }\n"
+ "}\n"
+ "function onMouseOver(obj) {\n"
+ " obj.style.background=\"lightblue\";\n"
+ "}\n"
+ "function onMouseOut(obj) {\n"
+ " obj.style.background=\"white\";\n"
+ "}\n"
+ "</script>\n"
+ "<style type=\"text/css\">\n"
+ "div { font-family: courier; font-size: 13 }\n"
+ "div.parent { margin-left: 15; display: none }\n"
+ "div.leaf { margin-left: 10 }\n"
+ "div.header { margin-left: 10 }\n"
+ "div.link { margin-left: 10; cursor: move }\n"
+ "span.parent { padding-right: 10; }\n"
+ "span.leaf { padding-right: 10; }\n"
+ "a img { border: 0;}\n"
+ "table.sortable th { border-width: 0px 1px 1px 1px; background-color: "
+ "#ccc;}\n"
+ "a { text-decoration: none; }\n"
+ "a:hover { text-decoration: underline; }\n"
+ "table.sortable th, table.sortable td { text-align: left;}"
+ "table.sortable tr.odd td { background-color: #ddd; }\n"
+ "table.sortable tr.even td { background-color: #fff; }\n"
+ "</style>\n"
+ "</head><body>\n\n";
+
+const char* htmlFooter = "\n</body>\n</html>\n";
+const char* profileSeparator =
+ "======================================================================";
+
+const char* tableHeader =
+ "<table class='sortable' id='%s'><tr>\n"
+ "<th>Method</th>\n"
+ "<th>Run 1 (us)</th>\n"
+ "<th>Run 2 (us)</th>\n"
+ "<th>Diff (us)</th>\n"
+ "<th>Diff (%%)</th>\n"
+ "<th>1: # calls</th>\n"
+ "<th>2: # calls</th>\n"
+ "</tr>\n";
+
+const char* tableHeaderMissing =
+ "<table class='sortable' id='%s'>\n"
+ "<th>Method</th>\n"
+ "<th>Exclusive</th>\n"
+ "<th>Inclusive</th>\n"
+ "<th># calls</th>\n";
+
+#define GRAPH_LABEL_VISITED 0x0001
+#define GRAPH_NODE_VISITED 0x0002
+
+/*
+ * Values from the header of the data file.
+ */
+typedef struct DataHeader {
+ uint32_t magic;
+ int16_t version;
+ int16_t offsetToData;
+ int64_t startWhen;
+ int16_t recordSize;
+} DataHeader;
+
+/*
+ * Entry from the thread list.
+ */
+typedef struct ThreadEntry {
+ int32_t threadId;
+ const char* threadName;
+} ThreadEntry;
+
+struct MethodEntry;
+typedef struct TimedMethod {
+ struct TimedMethod* next;
+ uint64_t elapsedInclusive;
+ int32_t numCalls;
+ struct MethodEntry* method;
+} TimedMethod;
+
+typedef struct ClassEntry {
+ const char* className;
+ uint64_t elapsedExclusive;
+ int32_t numMethods;
+ struct MethodEntry** methods; /* list of methods in this class */
+ int32_t numCalls[2]; /* 0=normal, 1=recursive */
+} ClassEntry;
+
+typedef struct UniqueMethodEntry {
+ uint64_t elapsedExclusive;
+ int32_t numMethods;
+ struct MethodEntry** methods; /* list of methods with same name */
+ int32_t numCalls[2]; /* 0=normal, 1=recursive */
+} UniqueMethodEntry;
+
+/*
+ * Entry from the method list.
+ */
+typedef struct MethodEntry {
+ int64_t methodId;
+ const char* className;
+ const char* methodName;
+ const char* signature;
+ const char* fileName;
+ int32_t lineNum;
+ uint64_t elapsedExclusive;
+ uint64_t elapsedInclusive;
+ uint64_t topExclusive; /* non-recursive exclusive time */
+ uint64_t recursiveInclusive;
+ struct TimedMethod* parents[2]; /* 0=normal, 1=recursive */
+ struct TimedMethod* children[2]; /* 0=normal, 1=recursive */
+ int32_t numCalls[2]; /* 0=normal, 1=recursive */
+ int32_t index; /* used after sorting to number methods */
+ int32_t recursiveEntries; /* number of entries on the stack */
+ int32_t graphState; /* used when graphing to see if this method has been visited before */
+} MethodEntry;
+
+/*
+ * The parsed contents of the key file.
+ */
+typedef struct DataKeys {
+ char* fileData; /* contents of the entire file */
+ int64_t fileLen;
+ int32_t numThreads;
+ ThreadEntry* threads;
+ int32_t numMethods;
+ MethodEntry* methods; /* 2 extra methods: "toplevel" and "unknown" */
+} DataKeys;
+
+#define TOPLEVEL_INDEX 0
+#define UNKNOWN_INDEX 1
+
+typedef struct StackEntry {
+ MethodEntry* method;
+ uint64_t entryTime;
+} StackEntry;
+
+typedef struct CallStack {
+ int32_t top;
+ StackEntry calls[MAX_STACK_DEPTH];
+ uint64_t lastEventTime;
+ uint64_t threadStartTime;
+} CallStack;
+
+typedef struct DiffEntry {
+ MethodEntry* method1;
+ MethodEntry* method2;
+ int64_t differenceExclusive;
+ int64_t differenceInclusive;
+ double differenceExclusivePercentage;
+ double differenceInclusivePercentage;
+} DiffEntry;
+
+// Global options
+typedef struct Options {
+ const char* traceFileName;
+ const char* diffFileName;
+ const char* graphFileName;
+ int32_t keepDotFile;
+ int32_t dump;
+ int32_t outputHtml;
+ const char* sortableUrl;
+ int32_t threshold;
+} Options;
+
+typedef struct TraceData {
+ int32_t numClasses;
+ ClassEntry* classes;
+ CallStack* stacks[MAX_THREADS];
+ int32_t depth[MAX_THREADS];
+ int32_t numUniqueMethods;
+ UniqueMethodEntry* uniqueMethods;
+} TraceData;
+
+static Options gOptions;
+
+/* Escapes characters in the source string that are html special entities.
+ * The escaped string is written to "dest" which must be large enough to
+ * hold the result. A pointer to "dest" is returned. The characters and
+ * their corresponding escape sequences are:
+ * '<' &lt;
+ * '>' &gt;
+ * '&' &amp;
+ */
+char* htmlEscape(const char* src, char* dest, int32_t len) {
+ char* destStart = dest;
+
+ if (src == nullptr) return nullptr;
+
+ int32_t nbytes = 0;
+ while (*src) {
+ if (*src == '<') {
+ nbytes += 4;
+ if (nbytes >= len) break;
+ *dest++ = '&';
+ *dest++ = 'l';
+ *dest++ = 't';
+ *dest++ = ';';
+ } else if (*src == '>') {
+ nbytes += 4;
+ if (nbytes >= len) break;
+ *dest++ = '&';
+ *dest++ = 'g';
+ *dest++ = 't';
+ *dest++ = ';';
+ } else if (*src == '&') {
+ nbytes += 5;
+ if (nbytes >= len) break;
+ *dest++ = '&';
+ *dest++ = 'a';
+ *dest++ = 'm';
+ *dest++ = 'p';
+ *dest++ = ';';
+ } else {
+ nbytes += 1;
+ if (nbytes >= len) break;
+ *dest++ = *src;
+ }
+ src += 1;
+ }
+ if (nbytes >= len) {
+ fprintf(stderr, "htmlEscape(): buffer overflow\n");
+ exit(1);
+ }
+ *dest = 0;
+
+ return destStart;
+}
+
+/* Initializes a MethodEntry
+ */
+void initMethodEntry(MethodEntry* method, int64_t methodId, const char* className,
+ const char* methodName, const char* signature, const char* fileName,
+ const char* lineNumStr) {
+ method->methodId = methodId;
+ method->className = className;
+ method->methodName = methodName;
+ method->signature = signature;
+ method->fileName = fileName;
+ method->lineNum = (lineNumStr != nullptr) ? atoi(lineNumStr) : -1;
+ method->elapsedExclusive = 0;
+ method->elapsedInclusive = 0;
+ method->topExclusive = 0;
+ method->recursiveInclusive = 0;
+ method->parents[0] = nullptr;
+ method->parents[1] = nullptr;
+ method->children[0] = nullptr;
+ method->children[1] = nullptr;
+ method->numCalls[0] = 0;
+ method->numCalls[1] = 0;
+ method->index = 0;
+ method->recursiveEntries = 0;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * methods into decreasing order of exclusive elapsed time.
+ */
+int32_t compareElapsedExclusive(const void* a, const void* b) {
+ const MethodEntry* methodA = *(const MethodEntry**) a;
+ const MethodEntry* methodB = *(const MethodEntry**) b;
+ uint64_t elapsed1 = methodA->elapsedExclusive;
+ uint64_t elapsed2 = methodB->elapsedExclusive;
+ if (elapsed1 < elapsed2) return 1;
+ if (elapsed1 > elapsed2) return -1;
+
+ /* If the elapsed times of two methods are equal, then sort them
+ * into alphabetical order.
+ */
+ int32_t result = strcmp(methodA->className, methodB->className);
+ if (result == 0) {
+ if (methodA->methodName == nullptr || methodB->methodName == nullptr) {
+ int64_t idA = methodA->methodId;
+ int64_t idB = methodB->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ result = strcmp(methodA->methodName, methodB->methodName);
+ if (result == 0) result = strcmp(methodA->signature, methodB->signature);
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * methods into decreasing order of inclusive elapsed time.
+ */
+int32_t compareElapsedInclusive(const void* a, const void* b) {
+ const MethodEntry* methodA = *(MethodEntry const**) a;
+ const MethodEntry* methodB = *(MethodEntry const**) b;
+ uint64_t elapsed1 = methodA->elapsedInclusive;
+ uint64_t elapsed2 = methodB->elapsedInclusive;
+ if (elapsed1 < elapsed2) return 1;
+ if (elapsed1 > elapsed2) return -1;
+
+ /* If the elapsed times of two methods are equal, then sort them
+ * into alphabetical order.
+ */
+ int32_t result = strcmp(methodA->className, methodB->className);
+ if (result == 0) {
+ if (methodA->methodName == nullptr || methodB->methodName == nullptr) {
+ int64_t idA = methodA->methodId;
+ int64_t idB = methodB->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ result = strcmp(methodA->methodName, methodB->methodName);
+ if (result == 0) result = strcmp(methodA->signature, methodB->signature);
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * TimedMethods into decreasing order of inclusive elapsed time.
+ */
+int32_t compareTimedMethod(const void* a, const void* b) {
+ const TimedMethod* timedA = (TimedMethod const*) a;
+ const TimedMethod* timedB = (TimedMethod const*) b;
+ uint64_t elapsed1 = timedA->elapsedInclusive;
+ uint64_t elapsed2 = timedB->elapsedInclusive;
+ if (elapsed1 < elapsed2) return 1;
+ if (elapsed1 > elapsed2) return -1;
+
+ /* If the elapsed times of two methods are equal, then sort them
+ * into alphabetical order.
+ */
+ MethodEntry* methodA = timedA->method;
+ MethodEntry* methodB = timedB->method;
+ int32_t result = strcmp(methodA->className, methodB->className);
+ if (result == 0) {
+ if (methodA->methodName == nullptr || methodB->methodName == nullptr) {
+ int64_t idA = methodA->methodId;
+ int64_t idB = methodB->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ result = strcmp(methodA->methodName, methodB->methodName);
+ if (result == 0) result = strcmp(methodA->signature, methodB->signature);
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * MethodEntry pointers into alphabetical order of class names.
+ */
+int32_t compareClassNames(const void* a, const void* b) {
+ const MethodEntry* methodA = *(const MethodEntry**) a;
+ const MethodEntry* methodB = *(const MethodEntry**) b;
+ int32_t result = strcmp(methodA->className, methodB->className);
+ if (result == 0) {
+ int64_t idA = methodA->methodId;
+ int64_t idB = methodB->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * classes into decreasing order of exclusive elapsed time.
+ */
+int32_t compareClassExclusive(const void* a, const void* b) {
+ const ClassEntry* classA = *(const ClassEntry**) a;
+ const ClassEntry* classB = *(const ClassEntry**) b;
+ uint64_t elapsed1 = classA->elapsedExclusive;
+ uint64_t elapsed2 = classB->elapsedExclusive;
+ if (elapsed1 < elapsed2) return 1;
+ if (elapsed1 > elapsed2) return -1;
+
+ /* If the elapsed times of two classs are equal, then sort them
+ * into alphabetical order.
+ */
+ int32_t result = strcmp(classA->className, classB->className);
+ if (result == 0) {
+ /* Break ties with the first method id. This is probably not
+ * needed.
+ */
+ int64_t idA = classA->methods[0]->methodId;
+ int64_t idB = classB->methods[0]->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * MethodEntry pointers into alphabetical order by method name,
+ * then by class name.
+ */
+int32_t compareMethodNames(const void* a, const void* b) {
+ const MethodEntry* methodA = *(const MethodEntry**) a;
+ const MethodEntry* methodB = *(const MethodEntry**) b;
+ if (methodA->methodName == nullptr || methodB->methodName == nullptr) {
+ return compareClassNames(a, b);
+ }
+ int32_t result = strcmp(methodA->methodName, methodB->methodName);
+ if (result == 0) {
+ result = strcmp(methodA->className, methodB->className);
+ if (result == 0) {
+ int64_t idA = methodA->methodId;
+ int64_t idB = methodB->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ }
+ return result;
+}
+
+/*
+ * This comparison function is called from qsort() to sort
+ * unique methods into decreasing order of exclusive elapsed time.
+ */
+int32_t compareUniqueExclusive(const void* a, const void* b) {
+ const UniqueMethodEntry* uniqueA = *(const UniqueMethodEntry**) a;
+ const UniqueMethodEntry* uniqueB = *(const UniqueMethodEntry**) b;
+ uint64_t elapsed1 = uniqueA->elapsedExclusive;
+ uint64_t elapsed2 = uniqueB->elapsedExclusive;
+ if (elapsed1 < elapsed2) return 1;
+ if (elapsed1 > elapsed2) return -1;
+
+ /* If the elapsed times of two methods are equal, then sort them
+ * into alphabetical order.
+ */
+ int32_t result = strcmp(uniqueA->methods[0]->className, uniqueB->methods[0]->className);
+ if (result == 0) {
+ int64_t idA = uniqueA->methods[0]->methodId;
+ int64_t idB = uniqueB->methods[0]->methodId;
+ if (idA < idB) return -1;
+ if (idA > idB) return 1;
+ return 0;
+ }
+ return result;
+}
+
+/*
+ * Free a DataKeys struct.
+ */
+void freeDataKeys(DataKeys* pKeys) {
+ if (pKeys == nullptr) return;
+
+ free(pKeys->fileData);
+ free(pKeys->threads);
+ free(pKeys->methods);
+ free(pKeys);
+}
+
+/*
+ * Find the offset to the next occurrence of the specified character.
+ *
+ * "data" should point somewhere within the current line. "len" is the
+ * number of bytes left in the buffer.
+ *
+ * Returns -1 if we hit the end of the buffer.
+ */
+int32_t findNextChar(const char* data, int32_t len, char lookFor) {
+ const char* start = data;
+
+ while (len > 0) {
+ if (*data == lookFor) return data - start;
+
+ data++;
+ len--;
+ }
+
+ return -1;
+}
+
+/*
+ * Count the number of lines until the next token.
+ *
+ * Returns -1 if none found before EOF.
+ */
+int32_t countLinesToToken(const char* data, int32_t len) {
+ int32_t count = 0;
+ int32_t next;
+
+ while (*data != TOKEN_CHAR) {
+ next = findNextChar(data, len, '\n');
+ if (next < 0) return -1;
+ count++;
+ data += next + 1;
+ len -= next + 1;
+ }
+
+ return count;
+}
+
+/*
+ * Make sure we're at the start of the right section.
+ *
+ * Returns the length of the token line, or -1 if something is wrong.
+ */
+int32_t checkToken(const char* data, int32_t len, const char* cmpStr) {
+ int32_t cmpLen = strlen(cmpStr);
+ int32_t next;
+
+ if (*data != TOKEN_CHAR) {
+ fprintf(stderr, "ERROR: not at start of %s (found '%.10s')\n", cmpStr, data);
+ return -1;
+ }
+
+ next = findNextChar(data, len, '\n');
+ if (next < cmpLen + 1) return -1;
+
+ if (strncmp(data + 1, cmpStr, cmpLen) != 0) {
+ fprintf(stderr, "ERROR: '%s' not found (got '%.7s')\n", cmpStr, data + 1);
+ return -1;
+ }
+
+ return next + 1;
+}
+
+/*
+ * Parse the "*version" section.
+ */
+int64_t parseVersion(DataKeys* pKeys, int64_t offset, int32_t verbose) {
+ if (offset < 0) return -1;
+
+ char* data = pKeys->fileData + offset;
+ char* dataEnd = pKeys->fileData + pKeys->fileLen;
+ int32_t next = checkToken(data, dataEnd - data, "version");
+ if (next <= 0) return -1;
+
+ data += next;
+
+ /*
+ * Count the number of items in the "version" section.
+ */
+ int32_t count = countLinesToToken(data, dataEnd - data);
+ if (count <= 0) {
+ fprintf(stderr, "ERROR: failed while reading version (found %d)\n", count);
+ return -1;
+ }
+
+ /* find the end of the line */
+ next = findNextChar(data, dataEnd - data, '\n');
+ if (next < 0) return -1;
+
+ data[next] = '\0';
+ versionNumber = strtoul(data, nullptr, 0);
+ if (verbose) printf("VERSION: %d\n", versionNumber);
+
+ data += next + 1;
+
+ /* skip over the rest of the stuff, which is "name=value" lines */
+ for (int32_t i = 1; i < count; i++) {
+ next = findNextChar(data, dataEnd - data, '\n');
+ if (next < 0) return -1;
+ // data[next] = '\0';
+ // printf("IGNORING: '%s'\n", data);
+ data += next + 1;
+ }
+
+ return data - pKeys->fileData;
+}
+
+/*
+ * Parse the "*threads" section.
+ */
+int64_t parseThreads(DataKeys* pKeys, int64_t offset) {
+ if (offset < 0) return -1;
+
+ char* data = pKeys->fileData + offset;
+ char* dataEnd = pKeys->fileData + pKeys->fileLen;
+ int32_t next = checkToken(data, dataEnd - data, "threads");
+
+ data += next;
+
+ /*
+ * Count the number of thread entries (one per line).
+ */
+ int32_t count = countLinesToToken(data, dataEnd - data);
+ if (count <= 0) {
+ fprintf(stderr, "ERROR: failed while reading threads (found %d)\n", count);
+ return -1;
+ }
+
+ // printf("+++ found %d threads\n", count);
+ pKeys->threads = new ThreadEntry[count];
+ if (pKeys->threads == nullptr) return -1;
+
+ /*
+ * Extract all entries.
+ */
+ for (int32_t i = 0; i < count; i++) {
+ next = findNextChar(data, dataEnd - data, '\n');
+ assert(next > 0);
+ data[next] = '\0';
+
+ int32_t tab = findNextChar(data, next, '\t');
+ data[tab] = '\0';
+
+ pKeys->threads[i].threadId = atoi(data);
+ pKeys->threads[i].threadName = data + tab + 1;
+
+ data += next + 1;
+ }
+
+ pKeys->numThreads = count;
+ return data - pKeys->fileData;
+}
+
+/*
+ * Parse the "*methods" section.
+ */
+int64_t parseMethods(DataKeys* pKeys, int64_t offset) {
+ if (offset < 0) return -1;
+
+ char* data = pKeys->fileData + offset;
+ char* dataEnd = pKeys->fileData + pKeys->fileLen;
+ int32_t next = checkToken(data, dataEnd - data, "methods");
+ if (next < 0) return -1;
+
+ data += next;
+
+ /*
+ * Count the number of method entries (one per line).
+ */
+ int32_t count = countLinesToToken(data, dataEnd - data);
+ if (count <= 0) {
+ fprintf(stderr, "ERROR: failed while reading methods (found %d)\n", count);
+ return -1;
+ }
+
+ /* Reserve an extra method at location 0 for the "toplevel" method,
+ * and another extra method for all other "unknown" methods.
+ */
+ count += 2;
+ pKeys->methods = new MethodEntry[count];
+ if (pKeys->methods == nullptr) return -1;
+ initMethodEntry(&pKeys->methods[TOPLEVEL_INDEX], -2, "(toplevel)", nullptr, nullptr,
+ nullptr, nullptr);
+ initMethodEntry(&pKeys->methods[UNKNOWN_INDEX], -1, "(unknown)", nullptr, nullptr,
+ nullptr, nullptr);
+
+ /*
+ * Extract all entries, starting with index 2.
+ */
+ for (int32_t i = UNKNOWN_INDEX + 1; i < count; i++) {
+ next = findNextChar(data, dataEnd - data, '\n');
+ assert(next > 0);
+ data[next] = '\0';
+
+ int32_t tab1 = findNextChar(data, next, '\t');
+ int32_t tab2 = findNextChar(data + (tab1 + 1), next - (tab1 + 1), '\t');
+ int32_t tab3 = findNextChar(data + (tab1 + tab2 + 2), next - (tab1 + tab2 + 2), '\t');
+ int32_t tab4 = findNextChar(data + (tab1 + tab2 + tab3 + 3),
+ next - (tab1 + tab2 + tab3 + 3), '\t');
+ int32_t tab5 = findNextChar(data + (tab1 + tab2 + tab3 + tab4 + 4),
+ next - (tab1 + tab2 + tab3 + tab4 + 4), '\t');
+ if (tab1 < 0) {
+ fprintf(stderr, "ERROR: missing field on method line: '%s'\n", data);
+ return -1;
+ }
+ assert(data[tab1] == '\t');
+ data[tab1] = '\0';
+
+ char* endptr;
+ int64_t id = strtoul(data, &endptr, 0);
+ if (*endptr != '\0') {
+ fprintf(stderr, "ERROR: bad method ID '%s'\n", data);
+ return -1;
+ }
+
+ // Allow files that specify just a function name, instead of requiring
+ // "class \t method \t signature"
+ if (tab2 > 0 && tab3 > 0) {
+ tab2 += tab1 + 1;
+ tab3 += tab2 + 1;
+ assert(data[tab2] == '\t');
+ assert(data[tab3] == '\t');
+ data[tab2] = data[tab3] = '\0';
+
+ // This is starting to get awkward. Allow filename and line #.
+ if (tab4 > 0 && tab5 > 0) {
+ tab4 += tab3 + 1;
+ tab5 += tab4 + 1;
+
+ assert(data[tab4] == '\t');
+ assert(data[tab5] == '\t');
+ data[tab4] = data[tab5] = '\0';
+
+ initMethodEntry(&pKeys->methods[i], id, data + tab1 + 1,
+ data + tab2 + 1, data + tab3 + 1, data + tab4 + 1,
+ data + tab5 + 1);
+ } else {
+ initMethodEntry(&pKeys->methods[i], id, data + tab1 + 1,
+ data + tab2 + 1, data + tab3 + 1, nullptr, nullptr);
+ }
+ } else {
+ initMethodEntry(&pKeys->methods[i], id, data + tab1 + 1, nullptr, nullptr, nullptr,
+ nullptr);
+ }
+
+ data += next + 1;
+ }
+
+ pKeys->numMethods = count;
+ return data - pKeys->fileData;
+}
+
+/*
+ * Parse the "*end" section.
+ */
+int64_t parseEnd(DataKeys* pKeys, int64_t offset) {
+ if (offset < 0) return -1;
+
+ char* data = pKeys->fileData + offset;
+ char* dataEnd = pKeys->fileData + pKeys->fileLen;
+ int32_t next = checkToken(data, dataEnd - data, "end");
+ if (next < 0) return -1;
+
+ data += next;
+
+ return data - pKeys->fileData;
+}
+
+/*
+ * Sort the thread list entries.
+ */
+static int32_t compareThreads(const void* thread1, const void* thread2) {
+ return ((const ThreadEntry*) thread1)->threadId -
+ ((const ThreadEntry*) thread2)->threadId;
+}
+
+void sortThreadList(DataKeys* pKeys) {
+ qsort(pKeys->threads, pKeys->numThreads, sizeof(pKeys->threads[0]), compareThreads);
+}
+
+/*
+ * Sort the method list entries.
+ */
+static int32_t compareMethods(const void* meth1, const void* meth2) {
+ int64_t id1 = ((const MethodEntry*) meth1)->methodId;
+ int64_t id2 = ((const MethodEntry*) meth2)->methodId;
+ if (id1 < id2) return -1;
+ if (id1 > id2) return 1;
+ return 0;
+}
+
+void sortMethodList(DataKeys* pKeys) {
+ qsort(pKeys->methods, pKeys->numMethods, sizeof(MethodEntry), compareMethods);
+}
+
+/*
+ * Parse the key section, and return a copy of the parsed contents.
+ */
+DataKeys* parseKeys(FILE* fp, int32_t verbose) {
+ int64_t offset;
+ DataKeys* pKeys = new DataKeys();
+ memset(pKeys, 0, sizeof(DataKeys));
+ if (pKeys == nullptr) return nullptr;
+
+ /*
+ * We load the entire file into memory. We do this, rather than memory-
+ * mapping it, because we want to change some whitespace to NULs.
+ */
+ if (fseek(fp, 0L, SEEK_END) != 0) {
+ perror("fseek");
+ freeDataKeys(pKeys);
+ return nullptr;
+ }
+ pKeys->fileLen = ftell(fp);
+ if (pKeys->fileLen == 0) {
+ fprintf(stderr, "Key file is empty.\n");
+ freeDataKeys(pKeys);
+ return nullptr;
+ }
+ rewind(fp);
+
+ pKeys->fileData = new char[pKeys->fileLen];
+ if (pKeys->fileData == nullptr) {
+ fprintf(stderr, "ERROR: unable to alloc %" PRIu64 " bytes\n", pKeys->fileLen);
+ freeDataKeys(pKeys);
+ return nullptr;
+ }
+
+ if (fread(pKeys->fileData, 1, pKeys->fileLen, fp) != (size_t)pKeys->fileLen) {
+ fprintf(stderr, "ERROR: unable to read %" PRIu64 " bytes from trace file\n", pKeys->fileLen);
+ freeDataKeys(pKeys);
+ return nullptr;
+ }
+
+ offset = 0;
+ offset = parseVersion(pKeys, offset, verbose);
+ offset = parseThreads(pKeys, offset);
+ offset = parseMethods(pKeys, offset);
+ offset = parseEnd(pKeys, offset);
+ if (offset < 0) {
+ freeDataKeys(pKeys);
+ return nullptr;
+ }
+
+ /* Reduce our allocation now that we know where the end of the key section is. */
+ pKeys->fileData = reinterpret_cast<char*>(realloc(pKeys->fileData, offset));
+ pKeys->fileLen = offset;
+ /* Leave fp pointing to the beginning of the data section. */
+ fseek(fp, offset, SEEK_SET);
+
+ sortThreadList(pKeys);
+ sortMethodList(pKeys);
+
+ /*
+ * Dump list of threads.
+ */
+ if (verbose) {
+ printf("Threads (%d):\n", pKeys->numThreads);
+ for (int32_t i = 0; i < pKeys->numThreads; i++) {
+ printf("%2d %s\n", pKeys->threads[i].threadId, pKeys->threads[i].threadName);
+ }
+ }
+
+#if 0
+ /*
+ * Dump list of methods.
+ */
+ if (verbose) {
+ printf("Methods (%d):\n", pKeys->numMethods);
+ for (int32_t i = 0; i < pKeys->numMethods; i++) {
+ printf("0x%08x %s : %s : %s\n",
+ pKeys->methods[i].methodId, pKeys->methods[i].className,
+ pKeys->methods[i].methodName, pKeys->methods[i].signature);
+ }
+ }
+#endif
+
+ return pKeys;
+}
+
+/*
+ * Read values from the binary data file.
+ */
+
+/*
+ * Make the return value "uint32_t" instead of "uint16_t" so that we can detect EOF.
+ */
+uint32_t read2LE(FILE* fp) {
+ uint32_t val = getc(fp);
+ val |= getc(fp) << 8;
+ return val;
+}
+uint32_t read4LE(FILE* fp) {
+ uint32_t val = getc(fp);
+ val |= getc(fp) << 8;
+ val |= getc(fp) << 16;
+ val |= getc(fp) << 24;
+ return val;
+}
+uint64_t read8LE(FILE* fp) {
+ uint64_t val = getc(fp);
+ val |= (uint64_t) getc(fp) << 8;
+ val |= (uint64_t) getc(fp) << 16;
+ val |= (uint64_t) getc(fp) << 24;
+ val |= (uint64_t) getc(fp) << 32;
+ val |= (uint64_t) getc(fp) << 40;
+ val |= (uint64_t) getc(fp) << 48;
+ val |= (uint64_t) getc(fp) << 56;
+ return val;
+}
+
+/*
+ * Parse the header of the data section.
+ *
+ * Returns with the file positioned at the start of the record data.
+ */
+int32_t parseDataHeader(FILE* fp, DataHeader* pHeader) {
+ pHeader->magic = read4LE(fp);
+ pHeader->version = read2LE(fp);
+ pHeader->offsetToData = read2LE(fp);
+ pHeader->startWhen = read8LE(fp);
+ int32_t bytesToRead = pHeader->offsetToData - 16;
+ if (pHeader->version == 1) {
+ pHeader->recordSize = 9;
+ } else if (pHeader->version == 2) {
+ pHeader->recordSize = 10;
+ } else if (pHeader->version == 3) {
+ pHeader->recordSize = read2LE(fp);
+ bytesToRead -= 2;
+ } else {
+ fprintf(stderr, "Unsupported trace file version: %d\n", pHeader->version);
+ return -1;
+ }
+
+ if (fseek(fp, bytesToRead, SEEK_CUR) != 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Look up a method by it's method ID.
+ *
+ * Returns nullptr if no matching method was found.
+ */
+MethodEntry* lookupMethod(DataKeys* pKeys, int64_t methodId) {
+ int32_t lo = 0;
+ int32_t hi = pKeys->numMethods - 1;
+
+ while (hi >= lo) {
+ int32_t mid = (hi + lo) / 2;
+
+ int64_t id = pKeys->methods[mid].methodId;
+ if (id == methodId) /* match */
+ return &pKeys->methods[mid];
+ else if (id < methodId) /* too low */
+ lo = mid + 1;
+ else /* too high */
+ hi = mid - 1;
+ }
+
+ return nullptr;
+}
+
+/*
+ * Reads the next data record, and assigns the data values to threadId,
+ * methodVal and elapsedTime. On end-of-file, the threadId, methodVal,
+ * and elapsedTime are unchanged. Returns 1 on end-of-file, otherwise
+ * returns 0.
+ */
+int32_t readDataRecord(FILE* dataFp, DataHeader* dataHeader, int32_t* threadId,
+ uint32_t* methodVal, uint64_t* elapsedTime) {
+ int32_t id;
+ int32_t bytesToRead = dataHeader->recordSize;
+ if (dataHeader->version == 1) {
+ id = getc(dataFp);
+ bytesToRead -= 1;
+ } else {
+ id = read2LE(dataFp);
+ bytesToRead -= 2;
+ }
+ if (id == EOF) return 1;
+ *threadId = id;
+
+ *methodVal = read4LE(dataFp);
+ *elapsedTime = read4LE(dataFp);
+ bytesToRead -= 8;
+
+ while (bytesToRead-- > 0) {
+ getc(dataFp);
+ }
+
+ if (feof(dataFp)) {
+ fprintf(stderr, "WARNING: hit EOF mid-record\n");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Read the key file and use it to produce formatted output from the
+ * data file.
+ */
+void dumpTrace() {
+ static const char* actionStr[] = {"ent", "xit", "unr", "???"};
+ MethodEntry bogusMethod = {
+ 0, "???", "???", "???", "???", -1, 0, 0,
+ 0, 0, {nullptr, nullptr}, {nullptr, nullptr}, {0, 0}, 0, 0, -1};
+ char bogusBuf[80];
+ TraceData traceData;
+
+ // printf("Dumping '%s' '%s'\n", dataFileName, keyFileName);
+
+ char spaces[MAX_STACK_DEPTH + 1];
+ memset(spaces, '.', MAX_STACK_DEPTH);
+ spaces[MAX_STACK_DEPTH] = '\0';
+
+ for (int32_t i = 0; i < MAX_THREADS; i++)
+ traceData.depth[i] = 2; // adjust for return from start function
+
+ FILE* dataFp = fopen(gOptions.traceFileName, "rb");
+ if (dataFp == nullptr) return;
+
+ DataKeys* pKeys = parseKeys(dataFp, 1);
+ if (pKeys == nullptr) {
+ fclose(dataFp);
+ return;
+ }
+
+ DataHeader dataHeader;
+ if (parseDataHeader(dataFp, &dataHeader) < 0) {
+ fclose(dataFp);
+ freeDataKeys(pKeys);
+ return;
+ }
+
+ printf("Trace (threadID action usecs class.method signature):\n");
+
+ while (1) {
+ /*
+ * Extract values from file.
+ */
+ int32_t threadId;
+ uint32_t methodVal;
+ uint64_t elapsedTime;
+ if (readDataRecord(dataFp, &dataHeader, &threadId, &methodVal, &elapsedTime))
+ break;
+
+ int32_t action = METHOD_ACTION(methodVal);
+ int64_t methodId = METHOD_ID(methodVal);
+
+ /*
+ * Generate a line of output.
+ */
+ int64_t lastEnter = 0;
+ int32_t mismatch = 0;
+ if (action == METHOD_TRACE_ENTER) {
+ traceData.depth[threadId]++;
+ lastEnter = methodId;
+ } else {
+ /* quick test for mismatched adjacent enter/exit */
+ if (lastEnter != 0 && lastEnter != methodId) mismatch = 1;
+ }
+
+ int32_t printDepth = traceData.depth[threadId];
+ char depthNote = ' ';
+ if (printDepth < 0) {
+ printDepth = 0;
+ depthNote = '-';
+ } else if (printDepth > MAX_STACK_DEPTH) {
+ printDepth = MAX_STACK_DEPTH;
+ depthNote = '+';
+ }
+
+ MethodEntry* method = lookupMethod(pKeys, methodId);
+ if (method == nullptr) {
+ method = &bogusMethod;
+ sprintf(bogusBuf, "methodId: %#" PRIx64 "", methodId);
+ method->signature = bogusBuf;
+ }
+
+ if (method->methodName) {
+ printf("%2d %s%c %8" PRIu64 "%c%s%s.%s %s\n", threadId, actionStr[action],
+ mismatch ? '!' : ' ', elapsedTime, depthNote,
+ spaces + (MAX_STACK_DEPTH - printDepth), method->className,
+ method->methodName, method->signature);
+ } else {
+ printf("%2d %s%c %8" PRIu64 "%c%s%s\n", threadId, actionStr[action],
+ mismatch ? '!' : ' ', elapsedTime, depthNote,
+ spaces + (MAX_STACK_DEPTH - printDepth), method->className);
+ }
+
+ if (action != METHOD_TRACE_ENTER) {
+ traceData.depth[threadId]--; /* METHOD_TRACE_EXIT or METHOD_TRACE_UNROLL */
+ lastEnter = 0;
+ }
+
+ mismatch = 0;
+ }
+
+ fclose(dataFp);
+ freeDataKeys(pKeys);
+}
+
+/* This routine adds the given time to the parent and child methods.
+ * This is called when the child routine exits, after the child has
+ * been popped from the stack. The elapsedTime parameter is the
+ * duration of the child routine, including time spent in called routines.
+ */
+void addInclusiveTime(MethodEntry* parent, MethodEntry* child, uint64_t elapsedTime) {
+#if 0
+ bool verbose = false;
+ if (strcmp(child->className, debugClassName) == 0)
+ verbose = true;
+#endif
+
+ int32_t childIsRecursive = (child->recursiveEntries > 0);
+ int32_t parentIsRecursive = (parent->recursiveEntries > 1);
+
+ if (child->recursiveEntries == 0) {
+ child->elapsedInclusive += elapsedTime;
+ } else if (child->recursiveEntries == 1) {
+ child->recursiveInclusive += elapsedTime;
+ }
+ child->numCalls[childIsRecursive] += 1;
+
+#if 0
+ if (verbose) {
+ fprintf(stderr,
+ "%s %d elapsedTime: %lld eI: %lld, rI: %lld\n",
+ child->className, child->recursiveEntries,
+ elapsedTime, child->elapsedInclusive,
+ child->recursiveInclusive);
+ }
+#endif
+
+ /* Find the child method in the parent */
+ TimedMethod* pTimed;
+ TimedMethod* children = parent->children[parentIsRecursive];
+ for (pTimed = children; pTimed; pTimed = pTimed->next) {
+ if (pTimed->method == child) {
+ pTimed->elapsedInclusive += elapsedTime;
+ pTimed->numCalls += 1;
+ break;
+ }
+ }
+ if (pTimed == nullptr) {
+ /* Allocate a new TimedMethod */
+ pTimed = new TimedMethod();
+ pTimed->elapsedInclusive = elapsedTime;
+ pTimed->numCalls = 1;
+ pTimed->method = child;
+
+ /* Add it to the front of the list */
+ pTimed->next = children;
+ parent->children[parentIsRecursive] = pTimed;
+ }
+
+ /* Find the parent method in the child */
+ TimedMethod* parents = child->parents[childIsRecursive];
+ for (pTimed = parents; pTimed; pTimed = pTimed->next) {
+ if (pTimed->method == parent) {
+ pTimed->elapsedInclusive += elapsedTime;
+ pTimed->numCalls += 1;
+ break;
+ }
+ }
+ if (pTimed == nullptr) {
+ /* Allocate a new TimedMethod */
+ pTimed = new TimedMethod();
+ pTimed->elapsedInclusive = elapsedTime;
+ pTimed->numCalls = 1;
+ pTimed->method = parent;
+
+ /* Add it to the front of the list */
+ pTimed->next = parents;
+ child->parents[childIsRecursive] = pTimed;
+ }
+
+#if 0
+ if (verbose) {
+ fprintf(stderr,
+ " %s %d eI: %lld\n",
+ parent->className, parent->recursiveEntries,
+ pTimed->elapsedInclusive);
+ }
+#endif
+}
+
+/* Sorts a linked list and returns a newly allocated array containing
+ * the sorted entries.
+ */
+TimedMethod* sortTimedMethodList(TimedMethod* list, int32_t* num) {
+ /* Count the elements */
+ TimedMethod* pTimed;
+ int32_t num_entries = 0;
+ for (pTimed = list; pTimed; pTimed = pTimed->next) num_entries += 1;
+ *num = num_entries;
+ if (num_entries == 0) return nullptr;
+
+ /* Copy all the list elements to a new array and sort them */
+ int32_t ii;
+ TimedMethod* sorted = new TimedMethod[num_entries];
+ for (ii = 0, pTimed = list; pTimed; pTimed = pTimed->next, ++ii)
+ memcpy(&sorted[ii], pTimed, sizeof(TimedMethod));
+ qsort(sorted, num_entries, sizeof(TimedMethod), compareTimedMethod);
+
+ /* Fix up the "next" pointers so that they work. */
+ for (ii = 0; ii < num_entries - 1; ++ii) sorted[ii].next = &sorted[ii + 1];
+ sorted[num_entries - 1].next = nullptr;
+
+ return sorted;
+}
+
+/* Define flag values for printInclusiveMethod() */
+static const int32_t kIsRecursive = 1;
+
+/* This prints the inclusive stats for all the parents or children of a
+ * method, depending on the list that is passed in.
+ */
+void printInclusiveMethod(MethodEntry* method, TimedMethod* list, int32_t numCalls, int32_t flags) {
+ char buf[80];
+ const char* anchor_close = "";
+ const char* spaces = " "; /* 6 spaces */
+ int32_t num_spaces = strlen(spaces);
+ const char* space_ptr = &spaces[num_spaces];
+ char classBuf[HTML_BUFSIZE], methodBuf[HTML_BUFSIZE];
+ char signatureBuf[HTML_BUFSIZE];
+
+ if (gOptions.outputHtml) anchor_close = "</a>";
+
+ int32_t num;
+ TimedMethod* sorted = sortTimedMethodList(list, &num);
+ double methodTotal = method->elapsedInclusive;
+ for (TimedMethod* pTimed = sorted; pTimed; pTimed = pTimed->next) {
+ MethodEntry* relative = pTimed->method;
+ const char* className = relative->className;
+ const char* methodName = relative->methodName;
+ const char* signature = relative->signature;
+ double per = 100.0 * pTimed->elapsedInclusive / methodTotal;
+ sprintf(buf, "[%d]", relative->index);
+ if (gOptions.outputHtml) {
+ int32_t len = strlen(buf);
+ if (len > num_spaces) len = num_spaces;
+ sprintf(buf, "<a href=\"#m%d\">[%d]", relative->index, relative->index);
+ space_ptr = &spaces[len];
+ className = htmlEscape(className, classBuf, HTML_BUFSIZE);
+ methodName = htmlEscape(methodName, methodBuf, HTML_BUFSIZE);
+ signature = htmlEscape(signature, signatureBuf, HTML_BUFSIZE);
+ }
+ int32_t nCalls = numCalls;
+ if (nCalls == 0) nCalls = relative->numCalls[0] + relative->numCalls[1];
+ if (relative->methodName) {
+ if (flags & kIsRecursive) {
+ // Don't display percentages for recursive functions
+ printf("%6s %5s %6s %s%6s%s %6d/%-6d %9" PRIu64 " %s.%s %s\n", "", "",
+ "", space_ptr, buf, anchor_close, pTimed->numCalls, nCalls,
+ pTimed->elapsedInclusive, className, methodName, signature);
+ } else {
+ printf("%6s %5s %5.1f%% %s%6s%s %6d/%-6d %9" PRIu64 " %s.%s %s\n", "",
+ "", per, space_ptr, buf, anchor_close, pTimed->numCalls, nCalls,
+ pTimed->elapsedInclusive, className, methodName, signature);
+ }
+ } else {
+ if (flags & kIsRecursive) {
+ // Don't display percentages for recursive functions
+ printf("%6s %5s %6s %s%6s%s %6d/%-6d %9" PRIu64 " %s\n", "", "", "",
+ space_ptr, buf, anchor_close, pTimed->numCalls, nCalls,
+ pTimed->elapsedInclusive, className);
+ } else {
+ printf("%6s %5s %5.1f%% %s%6s%s %6d/%-6d %9" PRIu64 " %s\n", "", "",
+ per, space_ptr, buf, anchor_close, pTimed->numCalls, nCalls,
+ pTimed->elapsedInclusive, className);
+ }
+ }
+ }
+}
+
+void countRecursiveEntries(CallStack* pStack, int32_t top, MethodEntry* method) {
+ method->recursiveEntries = 0;
+ for (int32_t ii = 0; ii < top; ++ii) {
+ if (pStack->calls[ii].method == method) method->recursiveEntries += 1;
+ }
+}
+
+void stackDump(CallStack* pStack, int32_t top) {
+ for (int32_t ii = 0; ii < top; ++ii) {
+ MethodEntry* method = pStack->calls[ii].method;
+ uint64_t entryTime = pStack->calls[ii].entryTime;
+ if (method->methodName) {
+ fprintf(stderr, " %2d: %8" PRIu64 " %s.%s %s\n", ii, entryTime,
+ method->className, method->methodName, method->signature);
+ } else {
+ fprintf(stderr, " %2d: %8" PRIu64 " %s\n", ii, entryTime, method->className);
+ }
+ }
+}
+
+void outputTableOfContents() {
+ printf("<a name=\"contents\"></a>\n");
+ printf("<h2>Table of Contents</h2>\n");
+ printf("<ul>\n");
+ printf(" <li><a href=\"#exclusive\">Exclusive profile</a></li>\n");
+ printf(" <li><a href=\"#inclusive\">Inclusive profile</a></li>\n");
+ printf(" <li><a href=\"#class\">Class/method profile</a></li>\n");
+ printf(" <li><a href=\"#method\">Method/class profile</a></li>\n");
+ printf("</ul>\n\n");
+}
+
+void outputNavigationBar() {
+ printf("<a href=\"#contents\">[Top]</a>\n");
+ printf("<a href=\"#exclusive\">[Exclusive]</a>\n");
+ printf("<a href=\"#inclusive\">[Inclusive]</a>\n");
+ printf("<a href=\"#class\">[Class]</a>\n");
+ printf("<a href=\"#method\">[Method]</a>\n");
+ printf("<br><br>\n");
+}
+
+void printExclusiveProfile(MethodEntry** pMethods, int32_t numMethods, uint64_t sumThreadTime) {
+ char classBuf[HTML_BUFSIZE], methodBuf[HTML_BUFSIZE];
+ char signatureBuf[HTML_BUFSIZE];
+ const char* anchor_close = "";
+ char anchor_buf[80];
+ anchor_buf[0] = 0;
+ if (gOptions.outputHtml) {
+ anchor_close = "</a>";
+ printf("<a name=\"exclusive\"></a>\n");
+ printf("<hr>\n");
+ outputNavigationBar();
+ } else {
+ printf("\n%s\n", profileSeparator);
+ }
+
+ /* First, sort the methods into decreasing order of inclusive
+ * elapsed time so that we can assign the method indices.
+ */
+ qsort(pMethods, numMethods, sizeof(MethodEntry*), compareElapsedInclusive);
+
+ for (int32_t ii = 0; ii < numMethods; ++ii) pMethods[ii]->index = ii;
+
+ /* Sort the methods into decreasing order of exclusive elapsed time. */
+ qsort(pMethods, numMethods, sizeof(MethodEntry*), compareElapsedExclusive);
+
+ printf("Total cycles: %" PRIu64 "\n\n", sumThreadTime);
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n");
+ }
+ printf("Exclusive elapsed times for each method, not including time spent in\n");
+ printf("children, sorted by exclusive time.\n\n");
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n<pre>\n");
+ }
+
+ printf(" Usecs self %% sum %% Method\n");
+
+ double sum = 0;
+ double total = sumThreadTime;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ MethodEntry* method = pMethods[ii];
+ /* Don't show methods with zero cycles */
+ if (method->elapsedExclusive == 0) break;
+ const char* className = method->className;
+ const char* methodName = method->methodName;
+ const char* signature = method->signature;
+ sum += method->elapsedExclusive;
+ double per = 100.0 * method->elapsedExclusive / total;
+ double sum_per = 100.0 * sum / total;
+ if (gOptions.outputHtml) {
+ sprintf(anchor_buf, "<a href=\"#m%d\">", method->index);
+ className = htmlEscape(className, classBuf, HTML_BUFSIZE);
+ methodName = htmlEscape(methodName, methodBuf, HTML_BUFSIZE);
+ signature = htmlEscape(signature, signatureBuf, HTML_BUFSIZE);
+ }
+ if (method->methodName) {
+ printf("%9" PRIu64 " %6.2f %6.2f %s[%d]%s %s.%s %s\n",
+ method->elapsedExclusive, per, sum_per, anchor_buf, method->index,
+ anchor_close, className, methodName, signature);
+ } else {
+ printf("%9" PRIu64 " %6.2f %6.2f %s[%d]%s %s\n",
+ method->elapsedExclusive, per, sum_per, anchor_buf, method->index,
+ anchor_close, className);
+ }
+ }
+ if (gOptions.outputHtml) {
+ printf("</pre>\n");
+ }
+}
+
+/* check to make sure that the child method meets the threshold of the parent */
+int32_t checkThreshold(MethodEntry* parent, MethodEntry* child) {
+ double parentTime = parent->elapsedInclusive;
+ double childTime = child->elapsedInclusive;
+ int64_t percentage = (childTime / parentTime) * 100.0;
+ return (percentage < gOptions.threshold) ? 0 : 1;
+}
+
+void createLabels(FILE* file, MethodEntry* method) {
+ fprintf(file,
+ "node%d[label = \"[%d] %s.%s (%" PRIu64 ", %" PRIu64 ", %d)\"]\n",
+ method->index, method->index, method->className, method->methodName,
+ method->elapsedInclusive / 1000, method->elapsedExclusive / 1000,
+ method->numCalls[0]);
+
+ method->graphState = GRAPH_LABEL_VISITED;
+
+ for (TimedMethod* child = method->children[0]; child; child = child->next) {
+ MethodEntry* childMethod = child->method;
+
+ if ((childMethod->graphState & GRAPH_LABEL_VISITED) == 0 &&
+ checkThreshold(method, childMethod)) {
+ createLabels(file, child->method);
+ }
+ }
+}
+
+void createLinks(FILE* file, MethodEntry* method) {
+ method->graphState |= GRAPH_NODE_VISITED;
+
+ for (TimedMethod* child = method->children[0]; child; child = child->next) {
+ MethodEntry* childMethod = child->method;
+ if (checkThreshold(method, child->method)) {
+ fprintf(file, "node%d -> node%d\n", method->index, child->method->index);
+ // only visit children that haven't been visited before
+ if ((childMethod->graphState & GRAPH_NODE_VISITED) == 0) {
+ createLinks(file, child->method);
+ }
+ }
+ }
+}
+
+void createInclusiveProfileGraphNew(DataKeys* dataKeys) {
+ // create a temporary file in /tmp
+ char path[FILENAME_MAX];
+ if (gOptions.keepDotFile) {
+ snprintf(path, FILENAME_MAX, "%s.dot", gOptions.graphFileName);
+ } else {
+ snprintf(path, FILENAME_MAX, "dot-%d-%d.dot", (int32_t)time(nullptr), rand());
+ }
+
+ FILE* file = fopen(path, "w+");
+
+ fprintf(file, "digraph g {\nnode [shape = record,height=.1];\n");
+
+ createLabels(file, dataKeys->methods);
+ createLinks(file, dataKeys->methods);
+
+ fprintf(file, "}");
+ fclose(file);
+
+ // now that we have the dot file generate the image
+ char command[1024];
+ snprintf(command, 1024, "dot -Tpng -o \"%s\" \"%s\"", gOptions.graphFileName, path);
+
+ system(command);
+
+ if (!gOptions.keepDotFile) {
+ remove(path);
+ }
+}
+
+void printInclusiveProfile(MethodEntry** pMethods, int32_t numMethods, uint64_t sumThreadTime) {
+ char classBuf[HTML_BUFSIZE], methodBuf[HTML_BUFSIZE];
+ char signatureBuf[HTML_BUFSIZE];
+ char anchor_buf[80];
+ const char* anchor_close = "";
+ anchor_buf[0] = 0;
+ if (gOptions.outputHtml) {
+ anchor_close = "</a>";
+ printf("<a name=\"inclusive\"></a>\n");
+ printf("<hr>\n");
+ outputNavigationBar();
+ } else {
+ printf("\n%s\n", profileSeparator);
+ }
+
+ /* Sort the methods into decreasing order of inclusive elapsed time. */
+ qsort(pMethods, numMethods, sizeof(MethodEntry*), compareElapsedInclusive);
+
+ printf("\nInclusive elapsed times for each method and its parents and children,\n");
+ printf("sorted by inclusive time.\n\n");
+
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n<pre>\n");
+ }
+
+ printf("index %%/total %%/self index calls usecs name\n");
+
+ double total = sumThreadTime;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ char buf[40];
+
+ MethodEntry* method = pMethods[ii];
+ /* Don't show methods with zero cycles */
+ if (method->elapsedInclusive == 0) break;
+
+ const char* className = method->className;
+ const char* methodName = method->methodName;
+ const char* signature = method->signature;
+
+ if (gOptions.outputHtml) {
+ printf("<a name=\"m%d\"></a>", method->index);
+ className = htmlEscape(className, classBuf, HTML_BUFSIZE);
+ methodName = htmlEscape(methodName, methodBuf, HTML_BUFSIZE);
+ signature = htmlEscape(signature, signatureBuf, HTML_BUFSIZE);
+ }
+ printf("----------------------------------------------------\n");
+
+ /* Sort and print the parents */
+ int32_t numCalls = method->numCalls[0] + method->numCalls[1];
+ printInclusiveMethod(method, method->parents[0], numCalls, 0);
+ if (method->parents[1]) {
+ printf(" +++++++++++++++++++++++++\n");
+ printInclusiveMethod(method, method->parents[1], numCalls, kIsRecursive);
+ }
+
+ double per = 100.0 * method->elapsedInclusive / total;
+ sprintf(buf, "[%d]", ii);
+ if (method->methodName) {
+ printf("%-6s %5.1f%% %5s %6s %6d+%-6d %9" PRIu64 " %s.%s %s\n", buf,
+ per, "", "", method->numCalls[0], method->numCalls[1],
+ method->elapsedInclusive, className, methodName, signature);
+ } else {
+ printf("%-6s %5.1f%% %5s %6s %6d+%-6d %9" PRIu64 " %s\n", buf, per, "",
+ "", method->numCalls[0], method->numCalls[1],
+ method->elapsedInclusive, className);
+ }
+ double excl_per = 100.0 * method->topExclusive / method->elapsedInclusive;
+ printf("%6s %5s %5.1f%% %6s %6s %6s %9" PRIu64 "\n", "", "", excl_per,
+ "excl", "", "", method->topExclusive);
+
+ /* Sort and print the children */
+ printInclusiveMethod(method, method->children[0], 0, 0);
+ if (method->children[1]) {
+ printf(" +++++++++++++++++++++++++\n");
+ printInclusiveMethod(method, method->children[1], 0, kIsRecursive);
+ }
+ }
+ if (gOptions.outputHtml) {
+ printf("</pre>\n");
+ }
+}
+
+void createClassList(TraceData* traceData, MethodEntry** pMethods, int32_t numMethods) {
+ /* Sort the methods into alphabetical order to find the unique class
+ * names.
+ */
+ qsort(pMethods, numMethods, sizeof(MethodEntry*), compareClassNames);
+
+ /* Count the number of unique class names. */
+ const char* currentClassName = "";
+ const char* firstClassName = nullptr;
+ traceData->numClasses = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) {
+ continue;
+ }
+ if (strcmp(pMethods[ii]->className, currentClassName) != 0) {
+ // Remember the first one
+ if (firstClassName == nullptr) {
+ firstClassName = pMethods[ii]->className;
+ }
+ traceData->numClasses += 1;
+ currentClassName = pMethods[ii]->className;
+ }
+ }
+
+ if (traceData->numClasses == 0) {
+ traceData->classes = nullptr;
+ return;
+ }
+
+ /* Allocate space for all of the unique class names */
+ traceData->classes = new ClassEntry[traceData->numClasses];
+
+ /* Initialize the classes array */
+ memset(traceData->classes, 0, sizeof(ClassEntry) * traceData->numClasses);
+ ClassEntry* pClass = traceData->classes;
+ pClass->className = currentClassName = firstClassName;
+ int32_t prevNumMethods = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) {
+ continue;
+ }
+ if (strcmp(pMethods[ii]->className, currentClassName) != 0) {
+ pClass->numMethods = prevNumMethods;
+ (++pClass)->className = currentClassName = pMethods[ii]->className;
+ prevNumMethods = 0;
+ }
+ prevNumMethods += 1;
+ }
+ pClass->numMethods = prevNumMethods;
+
+ /* Create the array of MethodEntry pointers for each class */
+ pClass = nullptr;
+ currentClassName = "";
+ int32_t nextMethod = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) {
+ continue;
+ }
+ if (strcmp(pMethods[ii]->className, currentClassName) != 0) {
+ currentClassName = pMethods[ii]->className;
+ if (pClass == nullptr)
+ pClass = traceData->classes;
+ else
+ pClass++;
+ /* Allocate space for the methods array */
+ pClass->methods = new MethodEntry*[pClass->numMethods];
+ nextMethod = 0;
+ }
+ pClass->methods[nextMethod++] = pMethods[ii];
+ }
+}
+
+/* Prints a number of html non-breaking spaces according so that the length
+ * of the string "buf" is at least "width" characters wide. If width is
+ * negative, then trailing spaces are added instead of leading spaces.
+ */
+void printHtmlField(char* buf, int32_t width) {
+ int32_t leadingSpaces = 1;
+ if (width < 0) {
+ width = -width;
+ leadingSpaces = 0;
+ }
+ int32_t len = strlen(buf);
+ int32_t numSpaces = width - len;
+ if (numSpaces <= 0) {
+ printf("%s", buf);
+ return;
+ }
+ if (leadingSpaces == 0) printf("%s", buf);
+ for (int32_t ii = 0; ii < numSpaces; ++ii) printf("&nbsp;");
+ if (leadingSpaces == 1) printf("%s", buf);
+}
+
+void printClassProfiles(TraceData* traceData, uint64_t sumThreadTime) {
+ char classBuf[HTML_BUFSIZE];
+ char methodBuf[HTML_BUFSIZE];
+ char signatureBuf[HTML_BUFSIZE];
+
+ if (gOptions.outputHtml) {
+ printf("<a name=\"class\"></a>\n");
+ printf("<hr>\n");
+ outputNavigationBar();
+ } else {
+ printf("\n%s\n", profileSeparator);
+ }
+
+ if (traceData->numClasses == 0) {
+ printf("\nNo classes.\n");
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n");
+ }
+ return;
+ }
+
+ printf("\nExclusive elapsed time for each class, summed over all the methods\n");
+ printf("in the class.\n\n");
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n");
+ }
+
+ /* For each class, sum the exclusive times in all of the methods
+ * in that class. Also sum the number of method calls. Also
+ * sort the methods so the most expensive appear at the top.
+ */
+ ClassEntry* pClass = traceData->classes;
+ for (int32_t ii = 0; ii < traceData->numClasses; ++ii, ++pClass) {
+ // printf("%s %d methods\n", pClass->className, pClass->numMethods);
+ int32_t numMethods = pClass->numMethods;
+ for (int32_t jj = 0; jj < numMethods; ++jj) {
+ MethodEntry* method = pClass->methods[jj];
+ pClass->elapsedExclusive += method->elapsedExclusive;
+ pClass->numCalls[0] += method->numCalls[0];
+ pClass->numCalls[1] += method->numCalls[1];
+ }
+
+ /* Sort the methods into decreasing order of exclusive time */
+ qsort(pClass->methods, numMethods, sizeof(MethodEntry*), compareElapsedExclusive);
+ }
+
+ /* Allocate an array of pointers to the classes for more efficient sorting. */
+ ClassEntry** pClasses = new ClassEntry*[traceData->numClasses];
+ for (int32_t ii = 0; ii < traceData->numClasses; ++ii)
+ pClasses[ii] = &traceData->classes[ii];
+
+ /* Sort the classes into decreasing order of exclusive time */
+ qsort(pClasses, traceData->numClasses, sizeof(ClassEntry*), compareClassExclusive);
+
+ if (gOptions.outputHtml) {
+ printf(
+ "<div class=\"header\"><span "
+ "class=\"parent\">&nbsp;</span>&nbsp;&nbsp;&nbsp;");
+ printf("Cycles %%/total Cumul.%% &nbsp;Calls+Recur&nbsp; Class</div>\n");
+ } else {
+ printf(" Cycles %%/total Cumul.%% Calls+Recur Class\n");
+ }
+
+ double sum = 0;
+ double total = sumThreadTime;
+ for (int32_t ii = 0; ii < traceData->numClasses; ++ii) {
+ /* Skip classes with zero cycles */
+ pClass = pClasses[ii];
+ if (pClass->elapsedExclusive == 0) break;
+
+ sum += pClass->elapsedExclusive;
+ double per = 100.0 * pClass->elapsedExclusive / total;
+ double sum_per = 100.0 * sum / total;
+ const char* className = pClass->className;
+ if (gOptions.outputHtml) {
+ char buf[80];
+
+ className = htmlEscape(className, classBuf, HTML_BUFSIZE);
+ printf(
+ "<div class=\"link\" onClick=\"javascript:toggle('d%d')\" "
+ "onMouseOver=\"javascript:onMouseOver(this)\" "
+ "onMouseOut=\"javascript:onMouseOut(this)\"><span class=\"parent\" "
+ "id=\"xd%d\">+</span>",
+ ii, ii);
+ sprintf(buf, "%" PRIu64, pClass->elapsedExclusive);
+ printHtmlField(buf, 9);
+ printf(" ");
+ sprintf(buf, "%.1f", per);
+ printHtmlField(buf, 7);
+ printf(" ");
+ sprintf(buf, "%.1f", sum_per);
+ printHtmlField(buf, 7);
+ printf(" ");
+ sprintf(buf, "%d", pClass->numCalls[0]);
+ printHtmlField(buf, 6);
+ printf("+");
+ sprintf(buf, "%d", pClass->numCalls[1]);
+ printHtmlField(buf, -6);
+ printf(" ");
+ printf("%s", className);
+ printf("</div>\n");
+ printf("<div class=\"parent\" id=\"d%d\">\n", ii);
+ } else {
+ printf("---------------------------------------------\n");
+ printf("%9" PRIu64 " %7.1f %7.1f %6d+%-6d %s\n", pClass->elapsedExclusive,
+ per, sum_per, pClass->numCalls[0], pClass->numCalls[1], className);
+ }
+
+ int32_t numMethods = pClass->numMethods;
+ double classExclusive = pClass->elapsedExclusive;
+ double sumMethods = 0;
+ for (int32_t jj = 0; jj < numMethods; ++jj) {
+ MethodEntry* method = pClass->methods[jj];
+ const char* methodName = method->methodName;
+ const char* signature = method->signature;
+ per = 100.0 * method->elapsedExclusive / classExclusive;
+ sumMethods += method->elapsedExclusive;
+ sum_per = 100.0 * sumMethods / classExclusive;
+ if (gOptions.outputHtml) {
+ char buf[80];
+
+ methodName = htmlEscape(methodName, methodBuf, HTML_BUFSIZE);
+ signature = htmlEscape(signature, signatureBuf, HTML_BUFSIZE);
+ printf("<div class=\"leaf\"><span class=\"leaf\">&nbsp;</span>");
+ sprintf(buf, "%" PRIu64, method->elapsedExclusive);
+ printHtmlField(buf, 9);
+ printf("&nbsp;");
+ sprintf(buf, "%" PRIu64, method->elapsedInclusive);
+ printHtmlField(buf, 9);
+ printf("&nbsp;");
+ sprintf(buf, "%.1f", per);
+ printHtmlField(buf, 7);
+ printf("&nbsp;");
+ sprintf(buf, "%.1f", sum_per);
+ printHtmlField(buf, 7);
+ printf("&nbsp;");
+ sprintf(buf, "%d", method->numCalls[0]);
+ printHtmlField(buf, 6);
+ printf("+");
+ sprintf(buf, "%d", method->numCalls[1]);
+ printHtmlField(buf, -6);
+ printf("&nbsp;");
+ printf("<a href=\"#m%d\">[%d]</a>&nbsp;%s&nbsp;%s", method->index,
+ method->index, methodName, signature);
+ printf("</div>\n");
+ } else {
+ printf("%9" PRIu64 " %9" PRIu64 " %7.1f %7.1f %6d+%-6d [%d] %s %s\n",
+ method->elapsedExclusive, method->elapsedInclusive, per, sum_per,
+ method->numCalls[0], method->numCalls[1], method->index,
+ methodName, signature);
+ }
+ }
+ if (gOptions.outputHtml) {
+ printf("</div>\n");
+ }
+ }
+}
+
+void createUniqueMethodList(TraceData* traceData, MethodEntry** pMethods, int32_t numMethods) {
+ /* Sort the methods into alphabetical order of method names
+ * to find the unique method names.
+ */
+ qsort(pMethods, numMethods, sizeof(MethodEntry*), compareMethodNames);
+
+ /* Count the number of unique method names, ignoring class and signature. */
+ const char* currentMethodName = "";
+ traceData->numUniqueMethods = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) continue;
+ if (strcmp(pMethods[ii]->methodName, currentMethodName) != 0) {
+ traceData->numUniqueMethods += 1;
+ currentMethodName = pMethods[ii]->methodName;
+ }
+ }
+ if (traceData->numUniqueMethods == 0) return;
+
+ /* Allocate space for pointers to all of the unique methods */
+ traceData->uniqueMethods = new UniqueMethodEntry[traceData->numUniqueMethods];
+
+ /* Initialize the uniqueMethods array */
+ memset(traceData->uniqueMethods, 0, sizeof(UniqueMethodEntry) * traceData->numUniqueMethods);
+ UniqueMethodEntry* pUnique = traceData->uniqueMethods;
+ currentMethodName = nullptr;
+ int32_t prevNumMethods = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) continue;
+ if (currentMethodName == nullptr) currentMethodName = pMethods[ii]->methodName;
+ if (strcmp(pMethods[ii]->methodName, currentMethodName) != 0) {
+ currentMethodName = pMethods[ii]->methodName;
+ pUnique->numMethods = prevNumMethods;
+ pUnique++;
+ prevNumMethods = 0;
+ }
+ prevNumMethods += 1;
+ }
+ pUnique->numMethods = prevNumMethods;
+
+ /* Create the array of MethodEntry pointers for each unique method */
+ pUnique = nullptr;
+ currentMethodName = "";
+ int32_t nextMethod = 0;
+ for (int32_t ii = 0; ii < numMethods; ++ii) {
+ if (pMethods[ii]->methodName == nullptr) continue;
+ if (strcmp(pMethods[ii]->methodName, currentMethodName) != 0) {
+ currentMethodName = pMethods[ii]->methodName;
+ if (pUnique == nullptr)
+ pUnique = traceData->uniqueMethods;
+ else
+ pUnique++;
+ /* Allocate space for the methods array */
+ pUnique->methods = new MethodEntry*[pUnique->numMethods];
+ nextMethod = 0;
+ }
+ pUnique->methods[nextMethod++] = pMethods[ii];
+ }
+}
+
+void printMethodProfiles(TraceData* traceData, uint64_t sumThreadTime) {
+ char classBuf[HTML_BUFSIZE], methodBuf[HTML_BUFSIZE];
+ char signatureBuf[HTML_BUFSIZE];
+
+ if (traceData->numUniqueMethods == 0) return;
+
+ if (gOptions.outputHtml) {
+ printf("<a name=\"method\"></a>\n");
+ printf("<hr>\n");
+ outputNavigationBar();
+ } else {
+ printf("\n%s\n", profileSeparator);
+ }
+
+ printf("\nExclusive elapsed time for each method, summed over all the classes\n");
+ printf("that contain a method with the same name.\n\n");
+ if (gOptions.outputHtml) {
+ printf("<br><br>\n");
+ }
+
+ /* For each unique method, sum the exclusive times in all of the methods
+ * with the same name. Also sum the number of method calls. Also
+ * sort the methods so the most expensive appear at the top.
+ */
+ UniqueMethodEntry* pUnique = traceData->uniqueMethods;
+ for (int32_t ii = 0; ii < traceData->numUniqueMethods; ++ii, ++pUnique) {
+ int32_t numMethods = pUnique->numMethods;
+ for (int32_t jj = 0; jj < numMethods; ++jj) {
+ MethodEntry* method = pUnique->methods[jj];
+ pUnique->elapsedExclusive += method->elapsedExclusive;
+ pUnique->numCalls[0] += method->numCalls[0];
+ pUnique->numCalls[1] += method->numCalls[1];
+ }
+
+ /* Sort the methods into decreasing order of exclusive time */
+ qsort(pUnique->methods, numMethods, sizeof(MethodEntry*), compareElapsedExclusive);
+ }
+
+ /* Allocate an array of pointers to the methods for more efficient sorting. */
+ UniqueMethodEntry** pUniqueMethods = new UniqueMethodEntry*[traceData->numUniqueMethods];
+ for (int32_t ii = 0; ii < traceData->numUniqueMethods; ++ii)
+ pUniqueMethods[ii] = &traceData->uniqueMethods[ii];
+
+ /* Sort the methods into decreasing order of exclusive time */
+ qsort(pUniqueMethods, traceData->numUniqueMethods, sizeof(UniqueMethodEntry*),
+ compareUniqueExclusive);
+
+ if (gOptions.outputHtml) {
+ printf(
+ "<div class=\"header\"><span "
+ "class=\"parent\">&nbsp;</span>&nbsp;&nbsp;&nbsp;");
+ printf("Cycles %%/total Cumul.%% &nbsp;Calls+Recur&nbsp; Method</div>\n");
+ } else {
+ printf(" Cycles %%/total Cumul.%% Calls+Recur Method\n");
+ }
+
+ double sum = 0;
+ double total = sumThreadTime;
+ for (int32_t ii = 0; ii < traceData->numUniqueMethods; ++ii) {
+ /* Skip methods with zero cycles */
+ pUnique = pUniqueMethods[ii];
+ if (pUnique->elapsedExclusive == 0) break;
+
+ sum += pUnique->elapsedExclusive;
+ double per = 100.0 * pUnique->elapsedExclusive / total;
+ double sum_per = 100.0 * sum / total;
+ const char* methodName = pUnique->methods[0]->methodName;
+ if (gOptions.outputHtml) {
+ char buf[80];
+
+ methodName = htmlEscape(methodName, methodBuf, HTML_BUFSIZE);
+ printf(
+ "<div class=\"link\" onClick=\"javascript:toggle('e%d')\" "
+ "onMouseOver=\"javascript:onMouseOver(this)\" "
+ "onMouseOut=\"javascript:onMouseOut(this)\"><span class=\"parent\" "
+ "id=\"xe%d\">+</span>",
+ ii, ii);
+ sprintf(buf, "%" PRIu64, pUnique->elapsedExclusive);
+ printHtmlField(buf, 9);
+ printf(" ");
+ sprintf(buf, "%.1f", per);
+ printHtmlField(buf, 7);
+ printf(" ");
+ sprintf(buf, "%.1f", sum_per);
+ printHtmlField(buf, 7);
+ printf(" ");
+ sprintf(buf, "%d", pUnique->numCalls[0]);
+ printHtmlField(buf, 6);
+ printf("+");
+ sprintf(buf, "%d", pUnique->numCalls[1]);
+ printHtmlField(buf, -6);
+ printf(" ");
+ printf("%s", methodName);
+ printf("</div>\n");
+ printf("<div class=\"parent\" id=\"e%d\">\n", ii);
+ } else {
+ printf("---------------------------------------------\n");
+ printf("%9" PRIu64 " %7.1f %7.1f %6d+%-6d %s\n",
+ pUnique->elapsedExclusive, per, sum_per, pUnique->numCalls[0],
+ pUnique->numCalls[1], methodName);
+ }
+ int32_t numMethods = pUnique->numMethods;
+ double methodExclusive = pUnique->elapsedExclusive;
+ double sumMethods = 0;
+ for (int32_t jj = 0; jj < numMethods; ++jj) {
+ MethodEntry* method = pUnique->methods[jj];
+ const char* className = method->className;
+ const char* signature = method->signature;
+ per = 100.0 * method->elapsedExclusive / methodExclusive;
+ sumMethods += method->elapsedExclusive;
+ sum_per = 100.0 * sumMethods / methodExclusive;
+ if (gOptions.outputHtml) {
+ char buf[80];
+
+ className = htmlEscape(className, classBuf, HTML_BUFSIZE);
+ signature = htmlEscape(signature, signatureBuf, HTML_BUFSIZE);
+ printf("<div class=\"leaf\"><span class=\"leaf\">&nbsp;</span>");
+ sprintf(buf, "%" PRIu64, method->elapsedExclusive);
+ printHtmlField(buf, 9);
+ printf("&nbsp;");
+ sprintf(buf, "%" PRIu64, method->elapsedInclusive);
+ printHtmlField(buf, 9);
+ printf("&nbsp;");
+ sprintf(buf, "%.1f", per);
+ printHtmlField(buf, 7);
+ printf("&nbsp;");
+ sprintf(buf, "%.1f", sum_per);
+ printHtmlField(buf, 7);
+ printf("&nbsp;");
+ sprintf(buf, "%d", method->numCalls[0]);
+ printHtmlField(buf, 6);
+ printf("+");
+ sprintf(buf, "%d", method->numCalls[1]);
+ printHtmlField(buf, -6);
+ printf("&nbsp;");
+ printf("<a href=\"#m%d\">[%d]</a>&nbsp;%s.%s&nbsp;%s", method->index,
+ method->index, className, methodName, signature);
+ printf("</div>\n");
+ } else {
+ printf("%9" PRIu64 " %9" PRIu64 " %7.1f %7.1f %6d+%-6d [%d] %s.%s %s\n",
+ method->elapsedExclusive, method->elapsedInclusive, per, sum_per,
+ method->numCalls[0], method->numCalls[1], method->index,
+ className, methodName, signature);
+ }
+ }
+ if (gOptions.outputHtml) {
+ printf("</div>\n");
+ }
+ }
+}
+
+/*
+ * Read the key and data files and return the MethodEntries for those files
+ */
+DataKeys* parseDataKeys(TraceData* traceData, const char* traceFileName, uint64_t* threadTime) {
+ MethodEntry* caller;
+
+ FILE* dataFp = fopen(traceFileName, "rb");
+ if (dataFp == nullptr) return nullptr;
+
+ DataKeys* dataKeys = parseKeys(dataFp, 0);
+ if (dataKeys == nullptr) {
+ fclose(dataFp);
+ return nullptr;
+ }
+
+ DataHeader dataHeader;
+ if (parseDataHeader(dataFp, &dataHeader) < 0) {
+ fclose(dataFp);
+ return dataKeys;
+ }
+
+#if 0
+ FILE* dumpStream = fopen("debug", "w");
+#endif
+ while (1) {
+ /*
+ * Extract values from file.
+ */
+ int32_t threadId;
+ uint32_t methodVal;
+ uint64_t currentTime;
+ if (readDataRecord(dataFp, &dataHeader, &threadId, &methodVal, &currentTime))
+ break;
+
+ int32_t action = METHOD_ACTION(methodVal);
+ int64_t methodId = METHOD_ID(methodVal);
+
+ /* Get the call stack for this thread */
+ CallStack* pStack = traceData->stacks[threadId];
+
+ /* If there is no call stack yet for this thread, then allocate one */
+ if (pStack == nullptr) {
+ pStack = new CallStack();
+ pStack->top = 0;
+ pStack->lastEventTime = currentTime;
+ pStack->threadStartTime = currentTime;
+ traceData->stacks[threadId] = pStack;
+ }
+
+ /* Lookup the current method */
+ MethodEntry* method = lookupMethod(dataKeys, methodId);
+ if (method == nullptr) method = &dataKeys->methods[UNKNOWN_INDEX];
+
+#if 0
+ if (method->methodName) {
+ fprintf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s.%s %s\n",
+ threadId, currentTime, action, pStack->threadStartTime,
+ method->recursiveEntries,
+ pStack->top, method->className, method->methodName,
+ method->signature);
+ } else {
+ fprintf(dumpStream, "%2d %-8llu %d %8llu r %d c %d %s\n",
+ threadId, currentTime, action, pStack->threadStartTime,
+ method->recursiveEntries,
+ pStack->top, method->className);
+ }
+#endif
+
+ if (action == METHOD_TRACE_ENTER) {
+ /* This is a method entry */
+ if (pStack->top >= MAX_STACK_DEPTH) {
+ fprintf(stderr, "Stack overflow (exceeded %d frames)\n",
+ MAX_STACK_DEPTH);
+ exit(1);
+ }
+
+ /* Get the caller method */
+ if (pStack->top >= 1)
+ caller = pStack->calls[pStack->top - 1].method;
+ else
+ caller = &dataKeys->methods[TOPLEVEL_INDEX];
+ countRecursiveEntries(pStack, pStack->top, caller);
+ caller->elapsedExclusive += currentTime - pStack->lastEventTime;
+#if 0
+ if (caller->elapsedExclusive > 10000000)
+ fprintf(dumpStream, "%llu current %llu last %llu diff %llu\n",
+ caller->elapsedExclusive, currentTime,
+ pStack->lastEventTime,
+ currentTime - pStack->lastEventTime);
+#endif
+ if (caller->recursiveEntries <= 1) {
+ caller->topExclusive += currentTime - pStack->lastEventTime;
+ }
+
+ /* Push the method on the stack for this thread */
+ pStack->calls[pStack->top].method = method;
+ pStack->calls[pStack->top++].entryTime = currentTime;
+ } else {
+ /* This is a method exit */
+ uint64_t entryTime = 0;
+
+ /* Pop the method off the stack for this thread */
+ if (pStack->top > 0) {
+ pStack->top -= 1;
+ entryTime = pStack->calls[pStack->top].entryTime;
+ if (method != pStack->calls[pStack->top].method) {
+ if (method->methodName) {
+ fprintf(stderr, "Exit from method %s.%s %s does not match stack:\n",
+ method->className, method->methodName, method->signature);
+ } else {
+ fprintf(stderr, "Exit from method %s does not match stack:\n",
+ method->className);
+ }
+ stackDump(pStack, pStack->top + 1);
+ exit(1);
+ }
+ }
+
+ /* Get the caller method */
+ if (pStack->top >= 1)
+ caller = pStack->calls[pStack->top - 1].method;
+ else
+ caller = &dataKeys->methods[TOPLEVEL_INDEX];
+ countRecursiveEntries(pStack, pStack->top, caller);
+ countRecursiveEntries(pStack, pStack->top, method);
+ uint64_t elapsed = currentTime - entryTime;
+ addInclusiveTime(caller, method, elapsed);
+ method->elapsedExclusive += currentTime - pStack->lastEventTime;
+ if (method->recursiveEntries == 0) {
+ method->topExclusive += currentTime - pStack->lastEventTime;
+ }
+ }
+ /* Remember the time of the last entry or exit event */
+ pStack->lastEventTime = currentTime;
+ }
+
+ /* If we have calls on the stack when the trace ends, then clean
+ * up the stack and add time to the callers by pretending that we
+ * are exiting from their methods now.
+ */
+ uint64_t sumThreadTime = 0;
+ for (int32_t threadId = 0; threadId < MAX_THREADS; ++threadId) {
+ CallStack* pStack = traceData->stacks[threadId];
+
+ /* If this thread never existed, then continue with next thread */
+ if (pStack == nullptr) continue;
+
+ /* Also, add up the time taken by all of the threads */
+ sumThreadTime += pStack->lastEventTime - pStack->threadStartTime;
+
+ for (int32_t ii = 0; ii < pStack->top; ++ii) {
+ if (ii == 0)
+ caller = &dataKeys->methods[TOPLEVEL_INDEX];
+ else
+ caller = pStack->calls[ii - 1].method;
+ MethodEntry* method = pStack->calls[ii].method;
+ countRecursiveEntries(pStack, ii, caller);
+ countRecursiveEntries(pStack, ii, method);
+
+ uint64_t entryTime = pStack->calls[ii].entryTime;
+ uint64_t elapsed = pStack->lastEventTime - entryTime;
+ addInclusiveTime(caller, method, elapsed);
+ }
+ }
+ caller = &dataKeys->methods[TOPLEVEL_INDEX];
+ caller->elapsedInclusive = sumThreadTime;
+
+#if 0
+ fclose(dumpStream);
+#endif
+
+ if (threadTime != nullptr) {
+ *threadTime = sumThreadTime;
+ }
+
+ fclose(dataFp);
+ return dataKeys;
+}
+
+MethodEntry** parseMethodEntries(DataKeys* dataKeys) {
+ /* Create a new array of pointers to the methods and sort the pointers
+ * instead of the actual MethodEntry structs. We need to do this
+ * because there are other lists that contain pointers to the
+ * MethodEntry structs.
+ */
+ MethodEntry** pMethods = new MethodEntry*[dataKeys->numMethods];
+ for (int32_t ii = 0; ii < dataKeys->numMethods; ++ii) {
+ MethodEntry* entry = &dataKeys->methods[ii];
+ pMethods[ii] = entry;
+ }
+
+ return pMethods;
+}
+
+/*
+ * Produce a function profile from the following methods
+ */
+void profileTrace(TraceData* traceData, MethodEntry** pMethods, int32_t numMethods,
+ uint64_t sumThreadTime) {
+ /* Print the html header, if necessary */
+ if (gOptions.outputHtml) {
+ printf(htmlHeader, gOptions.sortableUrl);
+ outputTableOfContents();
+ }
+
+ printExclusiveProfile(pMethods, numMethods, sumThreadTime);
+ printInclusiveProfile(pMethods, numMethods, sumThreadTime);
+
+ createClassList(traceData, pMethods, numMethods);
+ printClassProfiles(traceData, sumThreadTime);
+
+ createUniqueMethodList(traceData, pMethods, numMethods);
+ printMethodProfiles(traceData, sumThreadTime);
+
+ if (gOptions.outputHtml) {
+ printf("%s", htmlFooter);
+ }
+}
+
+int32_t compareMethodNamesForDiff(const void* a, const void* b) {
+ const MethodEntry* methodA = *(const MethodEntry**) a;
+ const MethodEntry* methodB = *(const MethodEntry**) b;
+ if (methodA->methodName == nullptr || methodB->methodName == nullptr) {
+ return compareClassNames(a, b);
+ }
+ int32_t result = strcmp(methodA->methodName, methodB->methodName);
+ if (result == 0) {
+ result = strcmp(methodA->signature, methodB->signature);
+ if (result == 0) {
+ return strcmp(methodA->className, methodB->className);
+ }
+ }
+ return result;
+}
+
+int32_t findMatch(MethodEntry** methods, int32_t size, MethodEntry* matchThis) {
+ for (int32_t i = 0; i < size; i++) {
+ MethodEntry* method = methods[i];
+
+ if (method != nullptr && !compareMethodNamesForDiff(&method, &matchThis)) {
+ // printf("%s.%s == %s.%s<br>\n", matchThis->className, matchThis->methodName,
+ // method->className, method->methodName);
+
+ return i;
+ // if (!compareMethodNames(&method, &matchThis)) return i;
+ }
+ }
+
+ return -1;
+}
+
+int32_t compareDiffEntriesExculsive(const void* a, const void* b) {
+ const DiffEntry* entryA = (const DiffEntry*) a;
+ const DiffEntry* entryB = (const DiffEntry*) b;
+
+ if (entryA->differenceExclusive < entryB->differenceExclusive) {
+ return 1;
+ } else if (entryA->differenceExclusive > entryB->differenceExclusive) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int32_t compareDiffEntriesInculsive(const void* a, const void* b) {
+ const DiffEntry* entryA = (const DiffEntry*) a;
+ const DiffEntry* entryB = (const DiffEntry*) b;
+
+ if (entryA->differenceInclusive < entryB->differenceInclusive) {
+ return 1;
+ } else if (entryA->differenceInclusive > entryB->differenceInclusive) {
+ return -1;
+ }
+
+ return 0;
+}
+
+void printMissingMethod(MethodEntry* method) {
+ char classBuf[HTML_BUFSIZE];
+ char methodBuf[HTML_BUFSIZE];
+
+ char* className = htmlEscape(method->className, classBuf, HTML_BUFSIZE);
+ char* methodName = htmlEscape(method->methodName, methodBuf, HTML_BUFSIZE);
+
+ if (gOptions.outputHtml) printf("<tr><td>\n");
+
+ printf("%s.%s ", className, methodName);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", method->elapsedExclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", method->elapsedInclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%d\n", method->numCalls[0]);
+ if (gOptions.outputHtml) printf("</td><td>\n");
+}
+
+void createDiff(DataKeys* d1, DataKeys* d2) {
+ MethodEntry** methods1 = parseMethodEntries(d1);
+ MethodEntry** methods2 = parseMethodEntries(d2);
+
+ // sort and assign the indicies
+ qsort(methods1, d1->numMethods, sizeof(MethodEntry*), compareElapsedInclusive);
+ for (int32_t i = 0; i < d1->numMethods; ++i) {
+ methods1[i]->index = i;
+ }
+
+ qsort(methods2, d2->numMethods, sizeof(MethodEntry*), compareElapsedInclusive);
+ for (int32_t i = 0; i < d2->numMethods; ++i) {
+ methods2[i]->index = i;
+ }
+
+ int32_t max = (d1->numMethods < d2->numMethods) ? d2->numMethods : d1->numMethods;
+ max++;
+ DiffEntry* diffs = new DiffEntry[max];
+ memset(diffs, 0, max * sizeof(DiffEntry));
+ DiffEntry* ptr = diffs;
+
+ // printf("<br>d1->numMethods: %d d1->numMethods: %d<br>\n",
+ // d1->numMethods, d2->numMethods);
+
+ int32_t matches = 0;
+
+ for (int32_t i = 0; i < d1->numMethods; i++) {
+ int32_t match = findMatch(methods2, d2->numMethods, methods1[i]);
+ if (match >= 0) {
+ ptr->method1 = methods1[i];
+ ptr->method2 = methods2[match];
+
+ uint64_t e1 = ptr->method1->elapsedExclusive;
+ uint64_t e2 = ptr->method2->elapsedExclusive;
+ if (e1 > 0) {
+ ptr->differenceExclusive = e2 - e1;
+ ptr->differenceExclusivePercentage = (static_cast<double>(e2) /
+ static_cast<double>(e1)) * 100.0;
+ }
+
+ uint64_t i1 = ptr->method1->elapsedInclusive;
+ uint64_t i2 = ptr->method2->elapsedInclusive;
+ if (i1 > 0) {
+ ptr->differenceInclusive = i2 - i1;
+ ptr->differenceInclusivePercentage = (static_cast<double>(i2) /
+ static_cast<double>(i1)) * 100.0;
+ }
+
+ // clear these out so we don't find them again and we know which ones
+ // we have left over
+ methods1[i] = nullptr;
+ methods2[match] = nullptr;
+ ptr++;
+
+ matches++;
+ }
+ }
+ ptr->method1 = nullptr;
+ ptr->method2 = nullptr;
+
+ qsort(diffs, matches, sizeof(DiffEntry), compareDiffEntriesExculsive);
+ ptr = diffs;
+
+ if (gOptions.outputHtml) {
+ printf(htmlHeader, gOptions.sortableUrl);
+ printf("<h3>Table of Contents</h3>\n");
+ printf("<ul>\n");
+ printf("<li><a href='#exclusive'>Exclusive</a>\n");
+ printf("<li><a href='#inclusive'>Inclusive</a>\n");
+ printf("</ul>\n");
+ printf("Run 1: %s<br>\n", gOptions.diffFileName);
+ printf("Run 2: %s<br>\n", gOptions.traceFileName);
+ printf("<a name=\"exclusive\"></a><h3 id=\"exclusive\">Exclusive</h3>\n");
+ printf(tableHeader, "exclusive_table");
+ }
+
+ char classBuf[HTML_BUFSIZE];
+ char methodBuf[HTML_BUFSIZE];
+ while (ptr->method1 != nullptr && ptr->method2 != nullptr) {
+ if (gOptions.outputHtml) printf("<tr><td>\n");
+
+ char* className = htmlEscape(ptr->method1->className, classBuf, HTML_BUFSIZE);
+ char* methodName = htmlEscape(ptr->method1->methodName, methodBuf, HTML_BUFSIZE);
+
+ printf("%s.%s ", className, methodName);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->method1->elapsedExclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->method2->elapsedExclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->differenceExclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%.2f\n", ptr->differenceExclusivePercentage);
+ if (gOptions.outputHtml) printf("</td><td>\n");
+
+ printf("%d\n", ptr->method1->numCalls[0]);
+ if (gOptions.outputHtml) printf("</td><td>\n");
+
+ printf("%d\n", ptr->method2->numCalls[0]);
+ if (gOptions.outputHtml) printf("</td></tr>\n");
+
+ ptr++;
+ }
+
+ if (gOptions.outputHtml) printf("</table>\n");
+
+ if (gOptions.outputHtml) {
+ printf(htmlHeader, gOptions.sortableUrl);
+ printf("Run 1: %s<br>\n", gOptions.diffFileName);
+ printf("Run 2: %s<br>\n", gOptions.traceFileName);
+ printf("<a name=\"inclusive\"></a><h3 id=\"inculisve\">Inclusive</h3>\n");
+ printf(tableHeader, "inclusive_table");
+ }
+
+ qsort(diffs, matches, sizeof(DiffEntry), compareDiffEntriesInculsive);
+ ptr = diffs;
+
+ while (ptr->method1 != nullptr && ptr->method2 != nullptr) {
+ if (gOptions.outputHtml) printf("<tr><td>\n");
+
+ char* className = htmlEscape(ptr->method1->className, classBuf, HTML_BUFSIZE);
+ char* methodName = htmlEscape(ptr->method1->methodName, methodBuf, HTML_BUFSIZE);
+
+ printf("%s.%s ", className, methodName);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->method1->elapsedInclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->method2->elapsedInclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%" PRIu64 " ", ptr->differenceInclusive);
+ if (gOptions.outputHtml) printf("</td><td>");
+
+ printf("%.2f\n", ptr->differenceInclusivePercentage);
+ if (gOptions.outputHtml) printf("</td><td>\n");
+
+ printf("%d\n", ptr->method1->numCalls[0]);
+ if (gOptions.outputHtml) printf("</td><td>\n");
+
+ printf("%d\n", ptr->method2->numCalls[0]);
+ if (gOptions.outputHtml) printf("</td></tr>\n");
+
+ ptr++;
+ }
+
+ if (gOptions.outputHtml) {
+ printf("</table>\n");
+ printf("<h3>Run 1 methods not found in Run 2</h3>");
+ printf(tableHeaderMissing, "?");
+ }
+
+ for (int32_t i = 0; i < d1->numMethods; ++i) {
+ if (methods1[i] != nullptr) {
+ printMissingMethod(methods1[i]);
+ }
+ }
+
+ if (gOptions.outputHtml) {
+ printf("</table>\n");
+ printf("<h3>Run 2 methods not found in Run 1</h3>");
+ printf(tableHeaderMissing, "?");
+ }
+
+ for (int32_t i = 0; i < d2->numMethods; ++i) {
+ if (methods2[i] != nullptr) {
+ printMissingMethod(methods2[i]);
+ }
+ }
+
+ if (gOptions.outputHtml) printf("</body></html\n");
+}
+
+int32_t usage(const char* program) {
+ fprintf(stderr, "Copyright (C) 2006 The Android Open Source Project\n\n");
+ fprintf(stderr,
+ "usage: %s [-ho] [-s sortable] [-d trace-file-name] [-g outfile] "
+ "trace-file-name\n",
+ program);
+ fprintf(stderr, " -d trace-file-name - Diff with this trace\n");
+ fprintf(stderr, " -g outfile - Write graph to 'outfile'\n");
+ fprintf(stderr,
+ " -k - When writing a graph, keep the intermediate "
+ "DOT file\n");
+ fprintf(stderr, " -h - Turn on HTML output\n");
+ fprintf(
+ stderr,
+ " -o - Dump the dmtrace file instead of profiling\n");
+ fprintf(stderr,
+ " -s - URL base to where the sortable javascript "
+ "file\n");
+ fprintf(stderr,
+ " -t threshold - Threshold percentage for including nodes in "
+ "the graph\n");
+ return 2;
+}
+
+// Returns true if there was an error
+int32_t parseOptions(int32_t argc, char** argv) {
+ while (1) {
+ int32_t opt = getopt(argc, argv, "d:hg:kos:t:");
+ if (opt == -1) break;
+ switch (opt) {
+ case 'd':
+ gOptions.diffFileName = optarg;
+ break;
+ case 'g':
+ gOptions.graphFileName = optarg;
+ break;
+ case 'k':
+ gOptions.keepDotFile = 1;
+ break;
+ case 'h':
+ gOptions.outputHtml = 1;
+ break;
+ case 'o':
+ gOptions.dump = 1;
+ break;
+ case 's':
+ gOptions.sortableUrl = optarg;
+ break;
+ case 't':
+ gOptions.threshold = atoi(optarg);
+ break;
+ default:
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Parse args.
+ */
+int32_t main(int32_t argc, char** argv) {
+ gOptions.threshold = -1;
+
+ // Parse the options
+ if (parseOptions(argc, argv) || argc - optind != 1) return usage(argv[0]);
+
+ gOptions.traceFileName = argv[optind];
+
+ if (gOptions.threshold < 0 || 100 <= gOptions.threshold) {
+ gOptions.threshold = 20;
+ }
+
+ if (gOptions.dump) {
+ dumpTrace();
+ return 0;
+ }
+
+ uint64_t sumThreadTime = 0;
+
+ TraceData data1;
+ DataKeys* dataKeys = parseDataKeys(&data1, gOptions.traceFileName, &sumThreadTime);
+ if (dataKeys == nullptr) {
+ fprintf(stderr, "Cannot read \"%s\".\n", gOptions.traceFileName);
+ exit(1);
+ }
+
+ if (gOptions.diffFileName != nullptr) {
+ uint64_t sum2;
+ TraceData data2;
+ DataKeys* d2 = parseDataKeys(&data2, gOptions.diffFileName, &sum2);
+ if (d2 == nullptr) {
+ fprintf(stderr, "Cannot read \"%s\".\n", gOptions.diffFileName);
+ exit(1);
+ }
+
+ createDiff(d2, dataKeys);
+
+ freeDataKeys(d2);
+ } else {
+ MethodEntry** methods = parseMethodEntries(dataKeys);
+ profileTrace(&data1, methods, dataKeys->numMethods, sumThreadTime);
+ if (gOptions.graphFileName != nullptr) {
+ createInclusiveProfileGraphNew(dataKeys);
+ }
+ free(methods);
+ }
+
+ freeDataKeys(dataKeys);
+
+ return 0;
+}
diff --git a/tools/extract-embedded-java b/tools/extract-embedded-java
new file mode 100755
index 0000000000..e966552af0
--- /dev/null
+++ b/tools/extract-embedded-java
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ "$#" -ne "2" ]; then
+ echo "Usage: ./extract_embedded_java.sh smali_dir java_dir"
+ exit 1
+fi
+
+# Check the input and output are directories
+[[ -d "$1" ]] || exit 1
+[[ -d "$2" ]] || exit 1
+
+# For every file which has the file extension smali, set $f to be the name without
+# .smali and then:
+for f in `find "$1" -type f -name "*.smali" | xargs -n 1 -P 0 -i basename -s .smali \{\}`; do
+ # remove all lines except those starting with '# ', remove the '#' then print
+ # it to a file ${name}.java. Do this concurrently.
+ grep "^# " "$1/${f}.smali" | sed "s:# ::" > "${2}/${f}.java" &
+done
+
+# wait for all the files to be written
+wait